summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/config/rs6000
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
committerkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
commit1f9ea4d0a40cca64d60cf4dab152349da7b9dddf (patch)
tree0cb530c9c38af219e6dda2994c078b6b2b9ad853 /contrib/gcc/config/rs6000
parent4895159b2b4f648051c1f139faa7b6dc50c2bfcb (diff)
downloadFreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.zip
FreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.tar.gz
GCC 4.2.0 release.
Diffstat (limited to 'contrib/gcc/config/rs6000')
-rw-r--r--contrib/gcc/config/rs6000/40x.md32
-rw-r--r--contrib/gcc/config/rs6000/440.md25
-rw-r--r--contrib/gcc/config/rs6000/603.md29
-rw-r--r--contrib/gcc/config/rs6000/6xx.md48
-rw-r--r--contrib/gcc/config/rs6000/7450.md36
-rw-r--r--contrib/gcc/config/rs6000/7xx.md34
-rw-r--r--contrib/gcc/config/rs6000/8540.md30
-rw-r--r--contrib/gcc/config/rs6000/aix.h102
-rw-r--r--contrib/gcc/config/rs6000/aix.opt25
-rw-r--r--contrib/gcc/config/rs6000/aix41.h32
-rw-r--r--contrib/gcc/config/rs6000/aix41.opt25
-rw-r--r--contrib/gcc/config/rs6000/aix43.h54
-rw-r--r--contrib/gcc/config/rs6000/aix51.h53
-rw-r--r--contrib/gcc/config/rs6000/aix52.h68
-rw-r--r--contrib/gcc/config/rs6000/aix64.opt33
-rw-r--r--contrib/gcc/config/rs6000/altivec.h11760
-rw-r--r--contrib/gcc/config/rs6000/altivec.md2337
-rw-r--r--contrib/gcc/config/rs6000/beos.h7
-rw-r--r--contrib/gcc/config/rs6000/biarch64.h4
-rw-r--r--contrib/gcc/config/rs6000/constraints.md162
-rw-r--r--contrib/gcc/config/rs6000/crtsavres.asm4
-rw-r--r--contrib/gcc/config/rs6000/darwin-asm.h61
-rw-r--r--contrib/gcc/config/rs6000/darwin-fallback.c471
-rw-r--r--contrib/gcc/config/rs6000/darwin-fpsave.asm102
-rw-r--r--contrib/gcc/config/rs6000/darwin-ldouble-format84
-rw-r--r--contrib/gcc/config/rs6000/darwin-ldouble.c396
-rw-r--r--contrib/gcc/config/rs6000/darwin-libgcc.10.4.ver76
-rw-r--r--contrib/gcc/config/rs6000/darwin-libgcc.10.5.ver89
-rw-r--r--contrib/gcc/config/rs6000/darwin-tramp.asm48
-rw-r--r--contrib/gcc/config/rs6000/darwin-unwind.h35
-rw-r--r--contrib/gcc/config/rs6000/darwin-vecsave.asm165
-rw-r--r--contrib/gcc/config/rs6000/darwin-world.asm269
-rw-r--r--contrib/gcc/config/rs6000/darwin.h308
-rw-r--r--contrib/gcc/config/rs6000/darwin.md440
-rw-r--r--contrib/gcc/config/rs6000/darwin.opt33
-rw-r--r--contrib/gcc/config/rs6000/darwin64.h36
-rw-r--r--contrib/gcc/config/rs6000/darwin7.h31
-rw-r--r--contrib/gcc/config/rs6000/darwin8.h33
-rw-r--r--contrib/gcc/config/rs6000/default64.h9
-rw-r--r--contrib/gcc/config/rs6000/e500-double.h25
-rw-r--r--contrib/gcc/config/rs6000/eabi-ci.asm4
-rw-r--r--contrib/gcc/config/rs6000/eabi-cn.asm4
-rw-r--r--contrib/gcc/config/rs6000/eabi.asm4
-rw-r--r--contrib/gcc/config/rs6000/eabi.h12
-rw-r--r--contrib/gcc/config/rs6000/eabialtivec.h4
-rw-r--r--contrib/gcc/config/rs6000/eabisim.h4
-rw-r--r--contrib/gcc/config/rs6000/eabispe.h23
-rw-r--r--contrib/gcc/config/rs6000/freebsd.h4
-rw-r--r--contrib/gcc/config/rs6000/gnu.h4
-rw-r--r--contrib/gcc/config/rs6000/host-darwin.c93
-rw-r--r--contrib/gcc/config/rs6000/host-ppc64-darwin.c31
-rw-r--r--contrib/gcc/config/rs6000/kaos-ppc.h4
-rw-r--r--contrib/gcc/config/rs6000/libgcc-ppc-glibc.ver52
-rw-r--r--contrib/gcc/config/rs6000/linux-unwind.h466
-rw-r--r--contrib/gcc/config/rs6000/linux.h52
-rw-r--r--contrib/gcc/config/rs6000/linux64.h196
-rw-r--r--contrib/gcc/config/rs6000/linux64.opt25
-rw-r--r--contrib/gcc/config/rs6000/linuxaltivec.h4
-rw-r--r--contrib/gcc/config/rs6000/linuxspe.h28
-rw-r--r--contrib/gcc/config/rs6000/lynx.h166
-rw-r--r--contrib/gcc/config/rs6000/mpc.md23
-rw-r--r--contrib/gcc/config/rs6000/netbsd.h4
-rw-r--r--contrib/gcc/config/rs6000/power4.md100
-rw-r--r--contrib/gcc/config/rs6000/power5.md78
-rw-r--r--contrib/gcc/config/rs6000/ppc64-fp.c60
-rw-r--r--contrib/gcc/config/rs6000/predicates.md1307
-rw-r--r--contrib/gcc/config/rs6000/rios1.md25
-rw-r--r--contrib/gcc/config/rs6000/rios2.md23
-rw-r--r--contrib/gcc/config/rs6000/rs6000-c.c2476
-rw-r--r--contrib/gcc/config/rs6000/rs6000-modes.def13
-rw-r--r--contrib/gcc/config/rs6000/rs6000-protos.h152
-rw-r--r--contrib/gcc/config/rs6000/rs6000.c10891
-rw-r--r--contrib/gcc/config/rs6000/rs6000.h1325
-rw-r--r--contrib/gcc/config/rs6000/rs6000.md5558
-rw-r--r--contrib/gcc/config/rs6000/rs6000.opt247
-rw-r--r--contrib/gcc/config/rs6000/rs64.md33
-rw-r--r--contrib/gcc/config/rs6000/rtems.h6
-rw-r--r--contrib/gcc/config/rs6000/secureplt.h21
-rw-r--r--contrib/gcc/config/rs6000/sfp-machine.h63
-rw-r--r--contrib/gcc/config/rs6000/sol-ci.asm4
-rw-r--r--contrib/gcc/config/rs6000/sol-cn.asm4
-rw-r--r--contrib/gcc/config/rs6000/spe.h6
-rw-r--r--contrib/gcc/config/rs6000/spe.md352
-rw-r--r--contrib/gcc/config/rs6000/sync.md625
-rw-r--r--contrib/gcc/config/rs6000/sysv4.h383
-rw-r--r--contrib/gcc/config/rs6000/sysv4.opt149
-rw-r--r--contrib/gcc/config/rs6000/sysv4le.h4
-rw-r--r--contrib/gcc/config/rs6000/t-aix4326
-rw-r--r--contrib/gcc/config/rs6000/t-aix5226
-rw-r--r--contrib/gcc/config/rs6000/t-beos3
-rw-r--r--contrib/gcc/config/rs6000/t-darwin40
-rw-r--r--contrib/gcc/config/rs6000/t-darwin83
-rw-r--r--contrib/gcc/config/rs6000/t-fprules13
-rw-r--r--contrib/gcc/config/rs6000/t-fprules-fpbit11
-rw-r--r--contrib/gcc/config/rs6000/t-fprules-softfp6
-rw-r--r--contrib/gcc/config/rs6000/t-linux6438
-rw-r--r--contrib/gcc/config/rs6000/t-lynx38
-rw-r--r--contrib/gcc/config/rs6000/t-ppccomm10
-rw-r--r--contrib/gcc/config/rs6000/t-rtems89
-rw-r--r--contrib/gcc/config/rs6000/t-vxworks15
-rw-r--r--contrib/gcc/config/rs6000/t-vxworksae5
-rw-r--r--contrib/gcc/config/rs6000/tramp.asm14
-rw-r--r--contrib/gcc/config/rs6000/vxworks.h181
-rw-r--r--contrib/gcc/config/rs6000/vxworksae.h24
-rw-r--r--contrib/gcc/config/rs6000/windiss.h4
-rw-r--r--contrib/gcc/config/rs6000/x-darwin8
-rw-r--r--contrib/gcc/config/rs6000/x-darwin644
-rw-r--r--contrib/gcc/config/rs6000/xcoff.h113
108 files changed, 21502 insertions, 21892 deletions
diff --git a/contrib/gcc/config/rs6000/40x.md b/contrib/gcc/config/rs6000/40x.md
index 9d229b4..94b6c45 100644
--- a/contrib/gcc/config/rs6000/40x.md
+++ b/contrib/gcc/config/rs6000/40x.md
@@ -1,5 +1,5 @@
;; Scheduling description for IBM PowerPC 403 and PowerPC 405 processors.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,22 +15,24 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
-(define_automaton "ppc40x")
-(define_cpu_unit "iu_40x,bpu_40x,fpu_405" "ppc40x")
+(define_automaton "ppc40x,ppc40xiu")
+(define_cpu_unit "bpu_40x,fpu_405" "ppc40x")
+(define_cpu_unit "iu_40x" "ppc40xiu")
;; PPC401 / PPC403 / PPC405 32-bit integer only IU BPU
;; Embedded PowerPC controller
;; In-order execution
;; Max issue two insns/cycle (includes one branch)
(define_insn_reservation "ppc403-load" 2
- (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
(eq_attr "cpu" "ppc403,ppc405"))
"iu_40x")
-(define_insn_reservation "ppc403-store" 1
+(define_insn_reservation "ppc403-store" 2
(and (eq_attr "type" "store,store_ux,store_u")
(eq_attr "cpu" "ppc403,ppc405"))
"iu_40x")
@@ -40,6 +42,16 @@
(eq_attr "cpu" "ppc403,ppc405"))
"iu_40x")
+(define_insn_reservation "ppc403-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,iu_40x")
+
+(define_insn_reservation "ppc403-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,iu_40x,iu_40x")
+
(define_insn_reservation "ppc403-compare" 3
(and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
(eq_attr "cpu" "ppc403,ppc405"))
@@ -91,7 +103,7 @@
"iu_40x")
(define_insn_reservation "ppc403-jmpreg" 1
- (and (eq_attr "type" "jmpreg,branch")
+ (and (eq_attr "type" "jmpreg,branch,isync")
(eq_attr "cpu" "ppc403,ppc405"))
"bpu_40x")
@@ -101,7 +113,7 @@
"bpu_40x")
(define_insn_reservation "ppc405-float" 11
- (and (eq_attr "type" "fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,fpcompare,fp,dmul,sdiv,ddiv")
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,\
+ fpcompare,fp,dmul,sdiv,ddiv")
(eq_attr "cpu" "ppc405"))
"fpu_405*10")
-
diff --git a/contrib/gcc/config/rs6000/440.md b/contrib/gcc/config/rs6000/440.md
index e98d5be..60e0f72 100644
--- a/contrib/gcc/config/rs6000/440.md
+++ b/contrib/gcc/config/rs6000/440.md
@@ -1,5 +1,5 @@
;; Scheduling description for IBM PowerPC 440 processor.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
@@ -15,8 +15,8 @@
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to
-;; the Free Software Foundation, 59 Temple Place - Suite 330,
-;; Boston, MA 02111-1307, USA.
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
;; PPC440 Embedded PowerPC controller
;; dual issue
@@ -34,11 +34,12 @@
(define_insn_reservation "ppc440-load" 3
- (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
(eq_attr "cpu" "ppc440"))
"ppc440_issue,ppc440_l_pipe")
-(define_insn_reservation "ppc440-store" 1
+(define_insn_reservation "ppc440-store" 3
(and (eq_attr "type" "store,store_ux,store_u")
(eq_attr "cpu" "ppc440"))
"ppc440_issue,ppc440_l_pipe")
@@ -58,6 +59,18 @@
(eq_attr "cpu" "ppc440"))
"ppc440_issue,ppc440_i_pipe|ppc440_j_pipe")
+(define_insn_reservation "ppc440-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue_0+ppc440_issue_1,\
+ ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe")
+
+(define_insn_reservation "ppc440-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue_0+ppc440_issue_1,ppc440_i_pipe|ppc440_j_pipe,\
+ ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe")
+
(define_insn_reservation "ppc440-imul" 3
(and (eq_attr "type" "imul,imul_compare")
(eq_attr "cpu" "ppc440"))
@@ -74,7 +87,7 @@
"ppc440_issue,ppc440_i_pipe*33")
(define_insn_reservation "ppc440-branch" 1
- (and (eq_attr "type" "branch,jmpreg")
+ (and (eq_attr "type" "branch,jmpreg,isync")
(eq_attr "cpu" "ppc440"))
"ppc440_issue,ppc440_i_pipe")
diff --git a/contrib/gcc/config/rs6000/603.md b/contrib/gcc/config/rs6000/603.md
index 7ae038e..4721aca 100644
--- a/contrib/gcc/config/rs6000/603.md
+++ b/contrib/gcc/config/rs6000/603.md
@@ -1,5 +1,5 @@
;; Scheduling description for PowerPC 603 processor.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,8 +15,8 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "ppc603,ppc603fp")
(define_cpu_unit "iu_603" "ppc603")
@@ -39,25 +39,40 @@
;; CR insns get executed in the SRU. Not modelled.
(define_insn_reservation "ppc603-load" 2
- (and (eq_attr "type" "load,load_ext,load_ux,load_u")
+ (and (eq_attr "type" "load,load_ext,load_ux,load_u,load_l")
(eq_attr "cpu" "ppc603"))
"lsu_603")
-(define_insn_reservation "ppc603-store" 1
+(define_insn_reservation "ppc603-store" 2
(and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
(eq_attr "cpu" "ppc603"))
- "lsu_603")
+ "lsu_603*2")
(define_insn_reservation "ppc603-fpload" 2
(and (eq_attr "type" "fpload,fpload_ux,fpload_u")
(eq_attr "cpu" "ppc603"))
"lsu_603")
+(define_insn_reservation "ppc603-storec" 8
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603")
+
(define_insn_reservation "ppc603-integer" 1
(and (eq_attr "type" "integer,insert_word")
(eq_attr "cpu" "ppc603"))
"iu_603")
+(define_insn_reservation "ppc603-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,iu_603")
+
+(define_insn_reservation "ppc603-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,iu_603,iu_603")
+
; This takes 2 or 3 cycles
(define_insn_reservation "ppc603-imul" 3
(and (eq_attr "type" "imul,imul_compare")
@@ -116,7 +131,7 @@
"sru_603")
(define_insn_reservation "ppc603-mfjmpr" 2
- (and (eq_attr "type" "mfjmpr")
+ (and (eq_attr "type" "mfjmpr,isync,sync")
(eq_attr "cpu" "ppc603"))
"sru_603")
diff --git a/contrib/gcc/config/rs6000/6xx.md b/contrib/gcc/config/rs6000/6xx.md
index d28d373..31aa606 100644
--- a/contrib/gcc/config/rs6000/6xx.md
+++ b/contrib/gcc/config/rs6000/6xx.md
@@ -1,6 +1,6 @@
;; Scheduling description for PowerPC 604, PowerPC 604e, PowerPC 620,
;; and PowerPC 630 processors.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -16,8 +16,8 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "ppc6xx,ppc6xxfp,ppc6xxfp2")
(define_cpu_unit "iu1_6xx,iu2_6xx,mciu_6xx" "ppc6xx")
@@ -58,16 +58,36 @@
(eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
"lsu_6xx")
-(define_insn_reservation "ppc604-store" 1
+(define_insn_reservation "ppc604-store" 3
(and (eq_attr "type" "store,fpstore,store_ux,store_u,fpstore_ux,fpstore_u")
(eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
"lsu_6xx")
+(define_insn_reservation "ppc604-llsc" 3
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc630-llsc" 4
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "lsu_6xx")
+
(define_insn_reservation "ppc604-integer" 1
(and (eq_attr "type" "integer,insert_word")
(eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
"iu1_6xx|iu2_6xx")
+(define_insn_reservation "ppc604-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx")
+
(define_insn_reservation "ppc604-imul" 4
(and (eq_attr "type" "imul,imul2,imul3,imul_compare")
(eq_attr "cpu" "ppc604"))
@@ -232,3 +252,23 @@
(eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
"bpu_6xx")
+(define_insn_reservation "ppc604-isync" 0
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc630-isync" 6
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc604-sync" 35
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc630-sync" 26
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "lsu_6xx")
+
diff --git a/contrib/gcc/config/rs6000/7450.md b/contrib/gcc/config/rs6000/7450.md
index 55bd4d8..99e8712 100644
--- a/contrib/gcc/config/rs6000/7450.md
+++ b/contrib/gcc/config/rs6000/7450.md
@@ -1,5 +1,5 @@
;; Scheduling description for Motorola PowerPC 7450 processor.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,11 +15,12 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
-(define_automaton "ppc7450,ppc7450fp,ppc7450vec")
-(define_cpu_unit "iu1_7450,iu2_7450,iu3_7450,mciu_7450" "ppc7450")
+(define_automaton "ppc7450,ppc7450mciu,ppc7450fp,ppc7450vec")
+(define_cpu_unit "iu1_7450,iu2_7450,iu3_7450" "ppc7450")
+(define_cpu_unit "mciu_7450" "ppc7450mciu")
(define_cpu_unit "fpu_7450" "ppc7450fp")
(define_cpu_unit "lsu_7450,bpu_7450" "ppc7450")
(define_cpu_unit "du1_7450,du2_7450,du3_7450" "ppc7450")
@@ -63,10 +64,31 @@
(eq_attr "cpu" "ppc7450"))
"ppc7450_du,lsu_7450*3")
+(define_insn_reservation "ppc7450-llsc" 3
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-sync" 35
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
(define_insn_reservation "ppc7450-integer" 1
(and (eq_attr "type" "integer,insert_word")
(eq_attr "cpu" "ppc7450"))
- "ppc7450_du,(iu1_7450|iu2_7450|iu3_7450)")
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,\
+ iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450")
(define_insn_reservation "ppc7450-imul" 4
(and (eq_attr "type" "imul,imul_compare")
@@ -130,7 +152,7 @@
"nothing,mciu_7450*2")
(define_insn_reservation "ppc7450-jmpreg" 1
- (and (eq_attr "type" "jmpreg,branch")
+ (and (eq_attr "type" "jmpreg,branch,isync")
(eq_attr "cpu" "ppc7450"))
"nothing,bpu_7450")
diff --git a/contrib/gcc/config/rs6000/7xx.md b/contrib/gcc/config/rs6000/7xx.md
index de8a7b7..77e58a3 100644
--- a/contrib/gcc/config/rs6000/7xx.md
+++ b/contrib/gcc/config/rs6000/7xx.md
@@ -1,5 +1,5 @@
;; Scheduling description for Motorola PowerPC 750 and PowerPC 7400 processors.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,8 +15,8 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "ppc7xx,ppc7xxfp")
(define_cpu_unit "iu1_7xx,iu2_7xx" "ppc7xx")
@@ -48,20 +48,36 @@
(define_insn_reservation "ppc750-load" 2
(and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
- load_ux,load_u,fpload,fpload_ux,fpload_u,vecload")
+ load_ux,load_u,fpload,fpload_ux,fpload_u,\
+ vecload,load_l")
(eq_attr "cpu" "ppc750,ppc7400"))
"ppc750_du,lsu_7xx")
-(define_insn_reservation "ppc750-store" 1
+(define_insn_reservation "ppc750-store" 2
(and (eq_attr "type" "store,store_ux,store_u,\
fpstore,fpstore_ux,fpstore_u,vecstore")
(eq_attr "cpu" "ppc750,ppc7400"))
"ppc750_du,lsu_7xx")
+(define_insn_reservation "ppc750-storec" 8
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,lsu_7xx")
+
(define_insn_reservation "ppc750-integer" 1
(and (eq_attr "type" "integer,insert_word")
(eq_attr "cpu" "ppc750,ppc7400"))
- "ppc750_du,(iu1_7xx|iu2_7xx)")
+ "ppc750_du,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx")
(define_insn_reservation "ppc750-imul" 4
(and (eq_attr "type" "imul,imul_compare")
@@ -127,10 +143,10 @@
(define_insn_reservation "ppc750-crlogical" 3
(and (eq_attr "type" "cr_logical,delayed_cr")
(eq_attr "cpu" "ppc750,ppc7400"))
- "ppc750_du,sru_7xx*2")
+ "nothing,sru_7xx*2")
(define_insn_reservation "ppc750-mtjmpr" 2
- (and (eq_attr "type" "mtjmpr")
+ (and (eq_attr "type" "mtjmpr,isync,sync")
(eq_attr "cpu" "ppc750,ppc7400"))
"nothing,sru_7xx*2")
@@ -140,7 +156,7 @@
"nothing,sru_7xx*2")
(define_insn_reservation "ppc750-jmpreg" 1
- (and (eq_attr "type" "jmpreg,branch")
+ (and (eq_attr "type" "jmpreg,branch,isync")
(eq_attr "cpu" "ppc750,ppc7400"))
"nothing,bpu_7xx")
diff --git a/contrib/gcc/config/rs6000/8540.md b/contrib/gcc/config/rs6000/8540.md
index 737c399..b42e247 100644
--- a/contrib/gcc/config/rs6000/8540.md
+++ b/contrib/gcc/config/rs6000/8540.md
@@ -1,5 +1,5 @@
;; Pipeline description for Motorola PowerPC 8540 processor.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,14 +15,14 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "ppc8540_most,ppc8540_long,ppc8540_retire")
(define_cpu_unit "ppc8540_decode_0,ppc8540_decode_1" "ppc8540_most")
;; We don't simulate general issue queue (GIC). If we have SU insn
-;; and then SU1 insn, they can not be issued on the same cycle
+;; and then SU1 insn, they cannot be issued on the same cycle
;; (although SU1 insn and then SU insn can be issued) because the SU
;; insn will go to SU1 from GIC0 entry. Fortunately, the first cycle
;; multipass insn scheduling will find the situation and issue the SU1
@@ -31,7 +31,7 @@
;; We could describe completion buffers slots in combination with the
;; retirement units and the order of completion but the result
-;; automaton would behave in the same way because we can not describe
+;; automaton would behave in the same way because we cannot describe
;; real latency time with taking in order completion into account.
;; Actually we could define the real latency time by querying reserved
;; automaton units but the current scheduler uses latency time before
@@ -89,9 +89,22 @@
(eq_attr "cpu" "ppc8540"))
"ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+(define_insn_reservation "ppc8540_two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+(define_insn_reservation "ppc8540_three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
;; Branch. Actually this latency time is not used by the scheduler.
(define_insn_reservation "ppc8540_branch" 1
- (and (eq_attr "type" "jmpreg,branch")
+ (and (eq_attr "type" "jmpreg,branch,isync")
(eq_attr "cpu" "ppc8540"))
"ppc8540_decode,ppc8540_bu,ppc8540_retire")
@@ -138,13 +151,14 @@
;; Loads
(define_insn_reservation "ppc8540_load" 3
- (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,sync")
(eq_attr "cpu" "ppc8540"))
"ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
;; Stores.
(define_insn_reservation "ppc8540_store" 3
- (and (eq_attr "type" "store,store_ux,store_u")
+ (and (eq_attr "type" "store,store_ux,store_u,store_c")
(eq_attr "cpu" "ppc8540"))
"ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
diff --git a/contrib/gcc/config/rs6000/aix.h b/contrib/gcc/config/rs6000/aix.h
index b14107f..57f4876 100644
--- a/contrib/gcc/config/rs6000/aix.h
+++ b/contrib/gcc/config/rs6000/aix.h
@@ -1,6 +1,7 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
This file is part of GCC.
@@ -16,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Yes! We are AIX! */
#define DEFAULT_ABI ABI_AIX
@@ -32,13 +33,17 @@
/* AIX allows r13 to be used in 32-bit mode. */
#define FIXED_R13 0
+/* 32-bit and 64-bit AIX stack boundary is 128. */
+#undef STACK_BOUNDARY
+#define STACK_BOUNDARY 128
+
/* AIX does not support Altivec. */
#undef TARGET_ALTIVEC
#define TARGET_ALTIVEC 0
#undef TARGET_ALTIVEC_ABI
#define TARGET_ALTIVEC_ABI 0
-#undef TARGET_ALTIVEC_VRSAVE
-#define TARGET_ALTIVEC_VRSAVE 0
+#undef TARGET_IEEEQUAD
+#define TARGET_IEEEQUAD 0
/* The AIX linker will discard static constructors in object files before
collect has a chance to see them, so scan the object files directly. */
@@ -51,26 +56,52 @@
#define REAL_NM_FILE_NAME "/usr/ucb/nm"
#define USER_LABEL_PREFIX ""
+
/* Don't turn -B into -L if the argument specifies a relative file name. */
#define RELATIVE_PREFIX_NOT_LINKDIR
/* Because of the above, we must have gcc search itself to find libgcc.a. */
#define LINK_LIBGCC_SPECIAL_1
+#define MFWRAP_SPEC " %{static: %{fmudflap|fmudflapth: \
+ -brename:malloc,__wrap_malloc -brename:__real_malloc,malloc \
+ -brename:free,__wrap_free -brename:__real_free,free \
+ -brename:calloc,__wrap_calloc -brename:__real_calloc,calloc \
+ -brename:realloc,__wrap_realloc -brename:__real_realloc,realloc \
+ -brename:mmap,__wrap_mmap -brename:__real_mmap,mmap \
+ -brename:munmap,__wrap_munmap -brename:__real_munmap,munmap \
+ -brename:alloca,__wrap_alloca -brename:__real_alloca,alloca \
+} %{fmudflapth: \
+ -brename:pthread_create,__wrap_pthread_create \
+ -brename:__real_pthread_create,pthread_create \
+ -brename:pthread_join,__wrap_pthread_join \
+ -brename:__real_pthread_join,pthread_join \
+ -brename:pthread_exit,__wrap_pthread_exit \
+ -brename:__real_pthread_exit,pthread_exit \
+}} %{fmudflap|fmudflapth: \
+ -brename:main,__wrap_main -brename:__real_main,main \
+}"
+
+#define MFLIB_SPEC " %{fmudflap: -lmudflap \
+ %{static:%(link_gcc_c_sequence) -lmudflap}} \
+ %{fmudflapth: -lmudflapth -lpthread \
+ %{static:%(link_gcc_c_sequence) -lmudflapth}} "
+
/* Names to predefine in the preprocessor for this target machine. */
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_define ("_IBMR2"); \
- builtin_define ("_POWER"); \
- builtin_define ("_AIX"); \
- builtin_define ("_AIX32"); \
- builtin_define ("_LONG_LONG"); \
- builtin_assert ("system=unix"); \
- builtin_assert ("system=aix"); \
- builtin_assert ("cpu=rs6000"); \
- builtin_assert ("machine=rs6000"); \
- } \
+#define TARGET_OS_AIX_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_IBMR2"); \
+ builtin_define ("_POWER"); \
+ builtin_define ("_AIX"); \
+ builtin_define ("_AIX32"); \
+ builtin_define ("_AIX41"); \
+ builtin_define ("_LONG_LONG"); \
+ if (TARGET_LONG_DOUBLE_128) \
+ builtin_define ("__LONGDOUBLE128"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=aix"); \
+ } \
while (0)
/* Define appropriate architecture macros for preprocessor depending on
@@ -139,12 +170,12 @@
/* AIX increases natural record alignment to doubleword if the first
field is an FP double while the FP fields remain word aligned. */
-#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
- ((TREE_CODE (STRUCT) == RECORD_TYPE \
- || TREE_CODE (STRUCT) == UNION_TYPE \
- || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
- && TARGET_ALIGN_NATURAL == 0 \
- ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TARGET_ALIGN_NATURAL == 0 \
+ ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
: MAX ((COMPUTED), (SPECIFIED)))
/* The AIX ABI isn't explicit on whether aggregates smaller than a
@@ -157,13 +188,6 @@
#define AGGREGATE_PADDING_FIXED 1
#define AGGREGATES_PAD_UPWARD_ALWAYS 1
-/* We don't want anything in the reg parm area being passed on the
- stack. */
-#define MUST_PASS_IN_STACK(MODE, TYPE) \
- ((TYPE) != 0 \
- && (TREE_CODE (TYPE_SIZE (TYPE)) != INTEGER_CST \
- || TREE_ADDRESSABLE (TYPE)))
-
/* Specify padding for the last element of a block move between
registers and memory. FIRST is nonzero if this is the only
element. */
@@ -174,19 +198,6 @@
#define JUMP_TABLES_IN_TEXT_SECTION 1
-/* Enable AIX XL compiler calling convention breakage compatibility. */
-#undef TARGET_XL_COMPAT
-#define MASK_XL_COMPAT 0x40000000
-#define TARGET_XL_COMPAT (target_flags & MASK_XL_COMPAT)
-#undef SUBTARGET_SWITCHES
-#define SUBTARGET_SWITCHES \
- {"xl-compat", MASK_XL_COMPAT, \
- N_("Conform more closely to IBM XLC semantics") }, \
- {"no-xl-compat", - MASK_XL_COMPAT, \
- N_("Default GCC semantics that differ from IBM XLC") }, \
- SUBSUBTARGET_SWITCHES
-#define SUBSUBTARGET_SWITCHES
-
/* Define any extra SPECS that the compiler needs to generate. */
#undef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS \
@@ -244,3 +255,6 @@
32-bit mode. */
#define OS_MISSING_POWERPC64 1
#define OS_MISSING_ALTIVEC 1
+
+/* WINT_TYPE */
+#define WINT_TYPE "int"
diff --git a/contrib/gcc/config/rs6000/aix.opt b/contrib/gcc/config/rs6000/aix.opt
new file mode 100644
index 0000000..e6dadce
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix.opt
@@ -0,0 +1,25 @@
+; AIX options.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mxl-compat
+Target Var(has_xl_compat_option)
+Conform more closely to IBM XLC semantics
diff --git a/contrib/gcc/config/rs6000/aix41.h b/contrib/gcc/config/rs6000/aix41.h
index 542f928..44b1390 100644
--- a/contrib/gcc/config/rs6000/aix41.h
+++ b/contrib/gcc/config/rs6000/aix41.h
@@ -1,6 +1,7 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX version 4.1.
- Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2003
+ Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004,
+ 2005
Free Software Foundation, Inc.
Contributed by David Edelsohn (edelsohn@gnu.org).
@@ -18,13 +19,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-#undef SUBSUBTARGET_SWITCHES
-#define SUBSUBTARGET_SWITCHES \
- {"pe", 0, \
- N_("Support message passing with the Parallel Environment") },
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef ASM_SPEC
#define ASM_SPEC "-u %(asm_cpu)"
@@ -33,18 +29,11 @@
#define ASM_DEFAULT_SPEC "-mcom"
#undef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_define ("_IBMR2"); \
- builtin_define ("_POWER"); \
- builtin_define ("_AIX"); \
- builtin_define ("_AIX32"); \
- builtin_define ("_AIX41"); \
- builtin_define ("_LONG_LONG"); \
- builtin_assert ("system=unix"); \
- builtin_assert ("system=aix"); \
- } \
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
while (0)
#undef CPP_SPEC
@@ -102,3 +91,6 @@
.set directives. We handle this by deferring the output of .set
directives to the end of the compilation unit. */
#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
+
+#undef TARGET_64BIT
+#define TARGET_64BIT 0
diff --git a/contrib/gcc/config/rs6000/aix41.opt b/contrib/gcc/config/rs6000/aix41.opt
new file mode 100644
index 0000000..a1f7665
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix41.opt
@@ -0,0 +1,25 @@
+; Options for AIX4.1.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mpe
+Target Report RejectNegative Var(internal_nothing_1)
+Support message passing with the Parallel Environment
diff --git a/contrib/gcc/config/rs6000/aix43.h b/contrib/gcc/config/rs6000/aix43.h
index 50bd304..b99f1a8 100644
--- a/contrib/gcc/config/rs6000/aix43.h
+++ b/contrib/gcc/config/rs6000/aix43.h
@@ -1,6 +1,7 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX version 4.3.
- Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
Contributed by David Edelsohn (edelsohn@gnu.org).
This file is part of GCC.
@@ -17,18 +18,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-/* AIX 4.3 and above support 64-bit executables. */
-#undef SUBSUBTARGET_SWITCHES
-#define SUBSUBTARGET_SWITCHES \
- {"aix64", MASK_64BIT | MASK_POWERPC64 | MASK_POWERPC, \
- N_("Compile for 64-bit pointers") }, \
- {"aix32", - (MASK_64BIT | MASK_POWERPC64), \
- N_("Compile for 32-bit pointers") }, \
- {"pe", 0, \
- N_("Support message passing with the Parallel Environment") },
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Sometimes certain combinations of command options do not make sense
on a particular target machine. You can define a macro
@@ -45,12 +36,18 @@ do { \
if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
{ \
target_flags &= ~NON_POWERPC_MASKS; \
- warning ("-maix64 and POWER architecture are incompatible"); \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
} \
if (TARGET_64BIT && ! TARGET_POWERPC64) \
{ \
target_flags |= MASK_POWERPC64; \
- warning ("-maix64 requires PowerPC64 architecture remain enabled"); \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \
+ { \
+ rs6000_long_double_type_size = 64; \
+ if (rs6000_explicit_options.long_double) \
+ warning (0, "soft-float and long-double-128 are incompatible"); \
} \
if (TARGET_POWERPC64 && ! TARGET_64BIT) \
{ \
@@ -96,19 +93,12 @@ do { \
#define ASM_DEFAULT_SPEC "-mcom"
#undef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_define ("_IBMR2"); \
- builtin_define ("_POWER"); \
- builtin_define ("_AIX"); \
- builtin_define ("_AIX32"); \
- builtin_define ("_AIX41"); \
- builtin_define ("_AIX43"); \
- builtin_define ("_LONG_LONG"); \
- builtin_assert ("system=unix"); \
- builtin_assert ("system=aix"); \
- } \
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
while (0)
#undef CPP_SPEC
@@ -122,10 +112,7 @@ do { \
defined. */
#undef CPLUSPLUS_CPP_SPEC
#define CPLUSPLUS_CPP_SPEC \
- "-D_XOPEN_SOURCE=500 \
- -D_XOPEN_SOURCE_EXTENDED=1 \
- -D_LARGE_FILE_API \
- -D_ALL_SOURCE \
+ "-D_ALL_SOURCE \
%{maix64: -D__64BIT__} \
%{mpe: -I/usr/lpp/ppe.poe/include} \
%{pthread: -D_THREAD_SAFE}"
@@ -192,3 +179,6 @@ do { \
.set directives. We handle this by deferring the output of .set
directives to the end of the compilation unit. */
#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
+
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
diff --git a/contrib/gcc/config/rs6000/aix51.h b/contrib/gcc/config/rs6000/aix51.h
index 863b97f..ad0afee 100644
--- a/contrib/gcc/config/rs6000/aix51.h
+++ b/contrib/gcc/config/rs6000/aix51.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX V5.
- Copyright (C) 2001, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by David Edelsohn (edelsohn@gnu.org).
This file is part of GCC.
@@ -17,18 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-/* AIX V5 and above support 64-bit executables. */
-#undef SUBSUBTARGET_SWITCHES
-#define SUBSUBTARGET_SWITCHES \
- {"aix64", MASK_64BIT | MASK_POWERPC64 | MASK_POWERPC, \
- N_("Compile for 64-bit pointers") }, \
- {"aix32", - (MASK_64BIT | MASK_POWERPC64), \
- N_("Compile for 32-bit pointers") }, \
- {"pe", 0, \
- N_("Support message passing with the Parallel Environment") },
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Sometimes certain combinations of command options do not make sense
on a particular target machine. You can define a macro
@@ -45,12 +35,12 @@ do { \
if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
{ \
target_flags &= ~NON_POWERPC_MASKS; \
- warning ("-maix64 and POWER architecture are incompatible"); \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
} \
if (TARGET_64BIT && ! TARGET_POWERPC64) \
{ \
target_flags |= MASK_POWERPC64; \
- warning ("-maix64 requires PowerPC64 architecture remain enabled"); \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
} \
if (TARGET_POWERPC64 && ! TARGET_64BIT) \
{ \
@@ -90,26 +80,21 @@ do { \
%{mcpu=604: -m604} \
%{mcpu=604e: -m604} \
%{mcpu=620: -m620} \
-%{mcpu=630: -m620}"
+%{mcpu=630: -m620} \
+%{mcpu=970: -m620} \
+%{mcpu=G5: -m620}"
#undef ASM_DEFAULT_SPEC
#define ASM_DEFAULT_SPEC "-mcom"
#undef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_define ("_IBMR2"); \
- builtin_define ("_POWER"); \
- builtin_define ("_LONG_LONG"); \
- builtin_define ("_AIX"); \
- builtin_define ("_AIX32"); \
- builtin_define ("_AIX41"); \
- builtin_define ("_AIX43"); \
- builtin_define ("_AIX51"); \
- builtin_assert ("system=unix"); \
- builtin_assert ("system=aix"); \
- } \
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ builtin_define ("_AIX51"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
while (0)
#undef CPP_SPEC
@@ -123,10 +108,7 @@ do { \
defined. */
#undef CPLUSPLUS_CPP_SPEC
#define CPLUSPLUS_CPP_SPEC \
- "-D_XOPEN_SOURCE=500 \
- -D_XOPEN_SOURCE_EXTENDED=1 \
- -D_LARGE_FILE_API \
- -D_ALL_SOURCE \
+ "-D_ALL_SOURCE \
%{maix64: -D__64BIT__} \
%{mpe: -I/usr/lpp/ppe.poe/include} \
%{pthread: -D_THREAD_SAFE}"
@@ -181,7 +163,6 @@ do { \
/* Width of wchar_t in bits. */
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
-#define MAX_WCHAR_TYPE_SIZE 32
/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
and "cror 31,31,31" for POWER architecture. */
@@ -197,3 +178,5 @@ do { \
#undef LD_INIT_SWITCH
#define LD_INIT_SWITCH "-binitfini"
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
diff --git a/contrib/gcc/config/rs6000/aix52.h b/contrib/gcc/config/rs6000/aix52.h
index 6f12619..87d2157 100644
--- a/contrib/gcc/config/rs6000/aix52.h
+++ b/contrib/gcc/config/rs6000/aix52.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX V5.2.
- Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
Contributed by David Edelsohn (edelsohn@gnu.org).
This file is part of GCC.
@@ -17,18 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-/* AIX V5 and above support 64-bit executables. */
-#undef SUBSUBTARGET_SWITCHES
-#define SUBSUBTARGET_SWITCHES \
- {"aix64", MASK_64BIT | MASK_POWERPC64 | MASK_POWERPC, \
- N_("Compile for 64-bit pointers") }, \
- {"aix32", - (MASK_64BIT | MASK_POWERPC64), \
- N_("Compile for 32-bit pointers") }, \
- {"pe", 0, \
- N_("Support message passing with the Parallel Environment") },
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Sometimes certain combinations of command options do not make sense
on a particular target machine. You can define a macro
@@ -45,12 +35,18 @@ do { \
if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
{ \
target_flags &= ~NON_POWERPC_MASKS; \
- warning ("-maix64 and POWER architecture are incompatible"); \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
} \
if (TARGET_64BIT && ! TARGET_POWERPC64) \
{ \
target_flags |= MASK_POWERPC64; \
- warning ("-maix64 requires PowerPC64 architecture remain enabled"); \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \
+ { \
+ rs6000_long_double_type_size = 64; \
+ if (rs6000_explicit_options.long_double) \
+ warning (0, "soft-float and long-double-128 are incompatible"); \
} \
if (TARGET_POWERPC64 && ! TARGET_64BIT) \
{ \
@@ -70,6 +66,9 @@ do { \
%{!mpower64: %(asm_default)}}} \
%{mcpu=power3: -m620} \
%{mcpu=power4: -m620} \
+%{mcpu=power5: -m620} \
+%{mcpu=power5+: -m620} \
+%{mcpu=power6: -m620} \
%{mcpu=powerpc: -mppc} \
%{mcpu=rs64a: -mppc} \
%{mcpu=603: -m603} \
@@ -77,27 +76,22 @@ do { \
%{mcpu=604: -m604} \
%{mcpu=604e: -m604} \
%{mcpu=620: -m620} \
-%{mcpu=630: -m620}"
+%{mcpu=630: -m620} \
+%{mcpu=970: -m620} \
+%{mcpu=G5: -m620}"
#undef ASM_DEFAULT_SPEC
#define ASM_DEFAULT_SPEC "-mppc"
#undef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_define ("_IBMR2"); \
- builtin_define ("_POWER"); \
- builtin_define ("_LONG_LONG"); \
- builtin_define ("_AIX"); \
- builtin_define ("_AIX32"); \
- builtin_define ("_AIX41"); \
- builtin_define ("_AIX43"); \
- builtin_define ("_AIX51"); \
- builtin_define ("_AIX52"); \
- builtin_assert ("system=unix"); \
- builtin_assert ("system=aix"); \
- } \
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ builtin_define ("_AIX51"); \
+ builtin_define ("_AIX52"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
while (0)
#undef CPP_SPEC
@@ -108,13 +102,10 @@ do { \
%{pthread: -D_THREAD_SAFE}"
/* The GNU C++ standard library requires that these macros be
- defined. */
+ defined. Synchronize with libstdc++ os_defines.h. */
#undef CPLUSPLUS_CPP_SPEC
#define CPLUSPLUS_CPP_SPEC \
- "-D_XOPEN_SOURCE=500 \
- -D_XOPEN_SOURCE_EXTENDED=1 \
- -D_LARGE_FILE_API \
- -D_ALL_SOURCE \
+ "-D_ALL_SOURCE \
%{maix64: -D__64BIT__} \
%{mpe: -I/usr/lpp/ppe.poe/include} \
%{pthread: -D_THREAD_SAFE}"
@@ -123,7 +114,7 @@ do { \
#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
#undef PROCESSOR_DEFAULT
-#define PROCESSOR_DEFAULT PROCESSOR_PPC630
+#define PROCESSOR_DEFAULT PROCESSOR_POWER4
#undef PROCESSOR_DEFAULT64
#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4
@@ -173,7 +164,6 @@ do { \
/* Width of wchar_t in bits. */
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
-#define MAX_WCHAR_TYPE_SIZE 32
/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
and "cror 31,31,31" for POWER architecture. */
@@ -197,3 +187,5 @@ do { \
extern long long int atoll(const char *);
#endif
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
diff --git a/contrib/gcc/config/rs6000/aix64.opt b/contrib/gcc/config/rs6000/aix64.opt
new file mode 100644
index 0000000..817e985
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix64.opt
@@ -0,0 +1,33 @@
+; Options for the 64-bit flavor of AIX.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+maix64
+Target Report RejectNegative Mask(64BIT)
+Compile for 64-bit pointers
+
+maix32
+Target Report RejectNegative InverseMask(64BIT)
+Compile for 32-bit pointers
+
+mpe
+Target Report RejectNegative Var(internal_nothing_1)
+Support message passing with the Parallel Environment
diff --git a/contrib/gcc/config/rs6000/altivec.h b/contrib/gcc/config/rs6000/altivec.h
index 779b428..dacb6d0 100644
--- a/contrib/gcc/config/rs6000/altivec.h
+++ b/contrib/gcc/config/rs6000/altivec.h
@@ -1,6 +1,7 @@
/* PowerPC AltiVec include file.
- Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Aldy Hernandez (aldyh@redhat.com).
+ Rewritten by Paolo Bonzini (bonzini@gnu.org).
This file is part of GCC.
@@ -16,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* As a special exception, if you include this header file into source
files compiled by GCC, this header file does not by itself cause
@@ -55,8 +56,7 @@
#define __CR6_LT 2
#define __CR6_LT_REV 3
-/* These are easy... Same exact arguments. */
-
+/* Synonyms. */
#define vec_vaddcuw vec_addc
#define vec_vand vec_and
#define vec_vandc vec_andc
@@ -99,11409 +99,363 @@
#define vec_vrfiz vec_trunc
#define vec_vxor vec_xor
-#ifdef __cplusplus
-
-extern "C++" {
-
-/* Prototypes for builtins that take literals and must always be
- inlined. */
-inline __vector float vec_ctf (__vector unsigned int, const int) __attribute__ ((always_inline));
-inline __vector float vec_ctf (__vector signed int, const int) __attribute__ ((always_inline));
-inline __vector float vec_vcfsx (__vector signed int a1, const int a2) __attribute__ ((always_inline));
-inline __vector float vec_vcfux (__vector unsigned int a1, const int a2) __attribute__ ((always_inline));
-inline __vector signed int vec_cts (__vector float, const int) __attribute__ ((always_inline));
-inline __vector unsigned int vec_ctu (__vector float, const int) __attribute__ ((always_inline));
-inline void vec_dss (const int) __attribute__ ((always_inline));
-
-inline void vec_dst (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector signed short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector signed int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const __vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const unsigned long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const float *, int, const int) __attribute__ ((always_inline));
-
-inline void vec_dstst (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector signed short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector signed int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const __vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const unsigned long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const float *, int, const int) __attribute__ ((always_inline));
-
-inline void vec_dststt (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector signed short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector signed int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const __vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const unsigned long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const float *, int, const int) __attribute__ ((always_inline));
-
-inline void vec_dstt (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector signed short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector signed int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const __vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const unsigned char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const signed char *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const unsigned short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const short *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const unsigned int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const int *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const unsigned long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const long *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const float *, int, const int) __attribute__ ((always_inline));
-
-inline __vector float vec_sld (__vector float, __vector float, const int) __attribute__ ((always_inline));
-inline __vector signed int vec_sld (__vector signed int, __vector signed int, const int) __attribute__ ((always_inline));
-inline __vector unsigned int vec_sld (__vector unsigned int, __vector unsigned int, const int) __attribute__ ((always_inline));
-inline __vector __bool int vec_sld (__vector __bool int, __vector __bool int, const int) __attribute__ ((always_inline));
-inline __vector signed short vec_sld (__vector signed short, __vector signed short, const int) __attribute__ ((always_inline));
-inline __vector unsigned short vec_sld (__vector unsigned short, __vector unsigned short, const int) __attribute__ ((always_inline));
-inline __vector __bool short vec_sld (__vector __bool short, __vector __bool short, const int) __attribute__ ((always_inline));
-inline __vector __pixel vec_sld (__vector __pixel, __vector __pixel, const int) __attribute__ ((always_inline));
-inline __vector signed char vec_sld (__vector signed char, __vector signed char, const int) __attribute__ ((always_inline));
-inline __vector unsigned char vec_sld (__vector unsigned char, __vector unsigned char, const int) __attribute__ ((always_inline));
-inline __vector __bool char vec_sld (__vector __bool char, __vector __bool char, const int) __attribute__ ((always_inline));
-inline __vector signed char vec_splat (__vector signed char, const int) __attribute__ ((always_inline));
-inline __vector unsigned char vec_splat (__vector unsigned char, const int) __attribute__ ((always_inline));
-inline __vector __bool char vec_splat (__vector __bool char, const int) __attribute__ ((always_inline));
-inline __vector signed short vec_splat (__vector signed short, const int) __attribute__ ((always_inline));
-inline __vector unsigned short vec_splat (__vector unsigned short, const int) __attribute__ ((always_inline));
-inline __vector __bool short vec_splat (__vector __bool short, const int) __attribute__ ((always_inline));
-inline __vector __pixel vec_splat (__vector __pixel, const int) __attribute__ ((always_inline));
-inline __vector float vec_splat (__vector float, const int) __attribute__ ((always_inline));
-inline __vector signed int vec_splat (__vector signed int, const int) __attribute__ ((always_inline));
-inline __vector unsigned int vec_splat (__vector unsigned int, const int) __attribute__ ((always_inline));
-inline __vector __bool int vec_splat (__vector __bool int, const int) __attribute__ ((always_inline));
-inline __vector signed char vec_splat_s8 (const int) __attribute__ ((always_inline));
-inline __vector signed short vec_splat_s16 (const int) __attribute__ ((always_inline));
-inline __vector signed int vec_splat_s32 (const int) __attribute__ ((always_inline));
-inline __vector unsigned char vec_splat_u8 (const int) __attribute__ ((always_inline));
-inline __vector unsigned short vec_splat_u16 (const int) __attribute__ ((always_inline));
-inline __vector unsigned int vec_splat_u32 (const int) __attribute__ ((always_inline));
-inline __vector float vec_vspltw (__vector float a1, const int a2) __attribute__ ((always_inline));
-inline __vector __bool int vec_vspltw (__vector __bool int a1, const int a2) __attribute__ ((always_inline));
-inline __vector signed int vec_vspltw (__vector signed int a1, const int a2) __attribute__ ((always_inline));
-inline __vector unsigned int vec_vspltw (__vector unsigned int a1, const int a2) __attribute__ ((always_inline));
-inline __vector __bool short vec_vsplth (__vector __bool short a1, const int a2) __attribute__ ((always_inline));
-inline __vector signed short vec_vsplth (__vector signed short a1, const int a2) __attribute__ ((always_inline));
-inline __vector unsigned short vec_vsplth (__vector unsigned short a1, const int a2) __attribute__ ((always_inline));
-inline __vector __pixel vec_vsplth (__vector __pixel a1, const int a2) __attribute__ ((always_inline));
-inline __vector __bool char vec_vspltb (__vector __bool char a1, const int a2) __attribute__ ((always_inline));
-inline __vector signed char vec_vspltb (__vector signed char a1, const int a2) __attribute__ ((always_inline));
-inline __vector unsigned char vec_vspltb (__vector unsigned char a1, const int a2) __attribute__ ((always_inline));
-
-/* vec_step */
-
-template<typename _Tp>
-struct __vec_step_help
-{
- // All proper __vector types will specialize _S_elem.
-};
-
-template<>
-struct __vec_step_help<__vector signed short>
-{
- static const int _S_elem = 8;
+/* Functions that are resolved by the backend to one of the
+ typed builtins. */
+#define vec_vaddfp __builtin_vec_vaddfp
+#define vec_addc __builtin_vec_addc
+#define vec_vaddsws __builtin_vec_vaddsws
+#define vec_vaddshs __builtin_vec_vaddshs
+#define vec_vaddsbs __builtin_vec_vaddsbs
+#define vec_vavgsw __builtin_vec_vavgsw
+#define vec_vavguw __builtin_vec_vavguw
+#define vec_vavgsh __builtin_vec_vavgsh
+#define vec_vavguh __builtin_vec_vavguh
+#define vec_vavgsb __builtin_vec_vavgsb
+#define vec_vavgub __builtin_vec_vavgub
+#define vec_ceil __builtin_vec_ceil
+#define vec_cmpb __builtin_vec_cmpb
+#define vec_vcmpeqfp __builtin_vec_vcmpeqfp
+#define vec_cmpge __builtin_vec_cmpge
+#define vec_vcmpgtfp __builtin_vec_vcmpgtfp
+#define vec_vcmpgtsw __builtin_vec_vcmpgtsw
+#define vec_vcmpgtuw __builtin_vec_vcmpgtuw
+#define vec_vcmpgtsh __builtin_vec_vcmpgtsh
+#define vec_vcmpgtuh __builtin_vec_vcmpgtuh
+#define vec_vcmpgtsb __builtin_vec_vcmpgtsb
+#define vec_vcmpgtub __builtin_vec_vcmpgtub
+#define vec_vcfsx __builtin_vec_vcfsx
+#define vec_vcfux __builtin_vec_vcfux
+#define vec_cts __builtin_vec_cts
+#define vec_ctu __builtin_vec_ctu
+#define vec_expte __builtin_vec_expte
+#define vec_floor __builtin_vec_floor
+#define vec_loge __builtin_vec_loge
+#define vec_madd __builtin_vec_madd
+#define vec_madds __builtin_vec_madds
+#define vec_mtvscr __builtin_vec_mtvscr
+#define vec_vmaxfp __builtin_vec_vmaxfp
+#define vec_vmaxsw __builtin_vec_vmaxsw
+#define vec_vmaxsh __builtin_vec_vmaxsh
+#define vec_vmaxsb __builtin_vec_vmaxsb
+#define vec_vminfp __builtin_vec_vminfp
+#define vec_vminsw __builtin_vec_vminsw
+#define vec_vminsh __builtin_vec_vminsh
+#define vec_vminsb __builtin_vec_vminsb
+#define vec_mradds __builtin_vec_mradds
+#define vec_vmsumshm __builtin_vec_vmsumshm
+#define vec_vmsumuhm __builtin_vec_vmsumuhm
+#define vec_vmsummbm __builtin_vec_vmsummbm
+#define vec_vmsumubm __builtin_vec_vmsumubm
+#define vec_vmsumshs __builtin_vec_vmsumshs
+#define vec_vmsumuhs __builtin_vec_vmsumuhs
+#define vec_vmulesb __builtin_vec_vmulesb
+#define vec_vmulesh __builtin_vec_vmulesh
+#define vec_vmuleuh __builtin_vec_vmuleuh
+#define vec_vmuleub __builtin_vec_vmuleub
+#define vec_vmulosh __builtin_vec_vmulosh
+#define vec_vmulouh __builtin_vec_vmulouh
+#define vec_vmulosb __builtin_vec_vmulosb
+#define vec_vmuloub __builtin_vec_vmuloub
+#define vec_nmsub __builtin_vec_nmsub
+#define vec_packpx __builtin_vec_packpx
+#define vec_vpkswss __builtin_vec_vpkswss
+#define vec_vpkuwus __builtin_vec_vpkuwus
+#define vec_vpkshss __builtin_vec_vpkshss
+#define vec_vpkuhus __builtin_vec_vpkuhus
+#define vec_vpkswus __builtin_vec_vpkswus
+#define vec_vpkshus __builtin_vec_vpkshus
+#define vec_re __builtin_vec_re
+#define vec_round __builtin_vec_round
+#define vec_rsqrte __builtin_vec_rsqrte
+#define vec_vsubfp __builtin_vec_vsubfp
+#define vec_subc __builtin_vec_subc
+#define vec_vsubsws __builtin_vec_vsubsws
+#define vec_vsubshs __builtin_vec_vsubshs
+#define vec_vsubsbs __builtin_vec_vsubsbs
+#define vec_sum4s __builtin_vec_sum4s
+#define vec_vsum4shs __builtin_vec_vsum4shs
+#define vec_vsum4sbs __builtin_vec_vsum4sbs
+#define vec_vsum4ubs __builtin_vec_vsum4ubs
+#define vec_sum2s __builtin_vec_sum2s
+#define vec_sums __builtin_vec_sums
+#define vec_trunc __builtin_vec_trunc
+#define vec_vupkhpx __builtin_vec_vupkhpx
+#define vec_vupkhsh __builtin_vec_vupkhsh
+#define vec_vupkhsb __builtin_vec_vupkhsb
+#define vec_vupklpx __builtin_vec_vupklpx
+#define vec_vupklsh __builtin_vec_vupklsh
+#define vec_vupklsb __builtin_vec_vupklsb
+#define vec_abs __builtin_vec_abs
+#define vec_abss __builtin_vec_abss
+#define vec_add __builtin_vec_add
+#define vec_adds __builtin_vec_adds
+#define vec_and __builtin_vec_and
+#define vec_andc __builtin_vec_andc
+#define vec_avg __builtin_vec_avg
+#define vec_cmpeq __builtin_vec_cmpeq
+#define vec_cmpgt __builtin_vec_cmpgt
+#define vec_ctf __builtin_vec_ctf
+#define vec_dst __builtin_vec_dst
+#define vec_dstst __builtin_vec_dstst
+#define vec_dststt __builtin_vec_dststt
+#define vec_dstt __builtin_vec_dstt
+#define vec_ld __builtin_vec_ld
+#define vec_lde __builtin_vec_lde
+#define vec_ldl __builtin_vec_ldl
+#define vec_lvebx __builtin_vec_lvebx
+#define vec_lvehx __builtin_vec_lvehx
+#define vec_lvewx __builtin_vec_lvewx
+#define vec_lvsl __builtin_vec_lvsl
+#define vec_lvsr __builtin_vec_lvsr
+#define vec_max __builtin_vec_max
+#define vec_mergeh __builtin_vec_mergeh
+#define vec_mergel __builtin_vec_mergel
+#define vec_min __builtin_vec_min
+#define vec_mladd __builtin_vec_mladd
+#define vec_msum __builtin_vec_msum
+#define vec_msums __builtin_vec_msums
+#define vec_mule __builtin_vec_mule
+#define vec_mulo __builtin_vec_mulo
+#define vec_nor __builtin_vec_nor
+#define vec_or __builtin_vec_or
+#define vec_pack __builtin_vec_pack
+#define vec_packs __builtin_vec_packs
+#define vec_packsu __builtin_vec_packsu
+#define vec_perm __builtin_vec_perm
+#define vec_rl __builtin_vec_rl
+#define vec_sel __builtin_vec_sel
+#define vec_sl __builtin_vec_sl
+#define vec_sld __builtin_vec_sld
+#define vec_sll __builtin_vec_sll
+#define vec_slo __builtin_vec_slo
+#define vec_splat __builtin_vec_splat
+#define vec_sr __builtin_vec_sr
+#define vec_sra __builtin_vec_sra
+#define vec_srl __builtin_vec_srl
+#define vec_sro __builtin_vec_sro
+#define vec_st __builtin_vec_st
+#define vec_ste __builtin_vec_ste
+#define vec_stl __builtin_vec_stl
+#define vec_stvebx __builtin_vec_stvebx
+#define vec_stvehx __builtin_vec_stvehx
+#define vec_stvewx __builtin_vec_stvewx
+#define vec_sub __builtin_vec_sub
+#define vec_subs __builtin_vec_subs
+#define vec_sum __builtin_vec_sum
+#define vec_unpackh __builtin_vec_unpackh
+#define vec_unpackl __builtin_vec_unpackl
+#define vec_vaddubm __builtin_vec_vaddubm
+#define vec_vaddubs __builtin_vec_vaddubs
+#define vec_vadduhm __builtin_vec_vadduhm
+#define vec_vadduhs __builtin_vec_vadduhs
+#define vec_vadduwm __builtin_vec_vadduwm
+#define vec_vadduws __builtin_vec_vadduws
+#define vec_vcmpequb __builtin_vec_vcmpequb
+#define vec_vcmpequh __builtin_vec_vcmpequh
+#define vec_vcmpequw __builtin_vec_vcmpequw
+#define vec_vmaxub __builtin_vec_vmaxub
+#define vec_vmaxuh __builtin_vec_vmaxuh
+#define vec_vmaxuw __builtin_vec_vmaxuw
+#define vec_vminub __builtin_vec_vminub
+#define vec_vminuh __builtin_vec_vminuh
+#define vec_vminuw __builtin_vec_vminuw
+#define vec_vmrghb __builtin_vec_vmrghb
+#define vec_vmrghh __builtin_vec_vmrghh
+#define vec_vmrghw __builtin_vec_vmrghw
+#define vec_vmrglb __builtin_vec_vmrglb
+#define vec_vmrglh __builtin_vec_vmrglh
+#define vec_vmrglw __builtin_vec_vmrglw
+#define vec_vpkuhum __builtin_vec_vpkuhum
+#define vec_vpkuwum __builtin_vec_vpkuwum
+#define vec_vrlb __builtin_vec_vrlb
+#define vec_vrlh __builtin_vec_vrlh
+#define vec_vrlw __builtin_vec_vrlw
+#define vec_vslb __builtin_vec_vslb
+#define vec_vslh __builtin_vec_vslh
+#define vec_vslw __builtin_vec_vslw
+#define vec_vspltb __builtin_vec_vspltb
+#define vec_vsplth __builtin_vec_vsplth
+#define vec_vspltw __builtin_vec_vspltw
+#define vec_vsrab __builtin_vec_vsrab
+#define vec_vsrah __builtin_vec_vsrah
+#define vec_vsraw __builtin_vec_vsraw
+#define vec_vsrb __builtin_vec_vsrb
+#define vec_vsrh __builtin_vec_vsrh
+#define vec_vsrw __builtin_vec_vsrw
+#define vec_vsububs __builtin_vec_vsububs
+#define vec_vsububm __builtin_vec_vsububm
+#define vec_vsubuhm __builtin_vec_vsubuhm
+#define vec_vsubuhs __builtin_vec_vsubuhs
+#define vec_vsubuwm __builtin_vec_vsubuwm
+#define vec_vsubuws __builtin_vec_vsubuws
+#define vec_xor __builtin_vec_xor
+
+/* Predicates.
+ For C++, we use templates in order to allow non-parenthesized arguments.
+ For C, instead, we use macros since non-parenthesized arguments were
+ not allowed even in older GCC implementation of AltiVec.
+
+ In the future, we may add more magic to the back-end, so that no
+ one- or two-argument macros are used. */
+
+#ifdef __cplusplus__
+#define __altivec_unary_pred(NAME, CALL) \
+template <class T> int NAME (T a1) { return CALL; }
+
+#define __altivec_scalar_pred(NAME, CALL) \
+template <class T, class U> int NAME (T a1, U a2) { return CALL; }
+
+/* Given the vec_step of a type, return the corresponding bool type. */
+template <int STEP> class __altivec_bool_ret { };
+template <> class __altivec_bool_ret <4> {
+ typedef __vector __bool int __ret;
};
-
-template<>
-struct __vec_step_help<__vector unsigned short>
-{
- static const int _S_elem = 8;
-};
-
-template<>
-struct __vec_step_help<__vector __bool short>
-{
- static const int _S_elem = 8;
-};
-
-template<>
-struct __vec_step_help<__vector __pixel>
-{
- static const int _S_elem = 8;
-};
-
-template<>
-struct __vec_step_help<__vector signed int>
-{
- static const int _S_elem = 4;
-};
-
-template<>
-struct __vec_step_help<__vector unsigned int>
-{
- static const int _S_elem = 4;
+template <> class __altivec_bool_ret <8> {
+ typedef __vector __bool short __ret;
};
-
-template<>
-struct __vec_step_help<__vector __bool int>
-{
- static const int _S_elem = 4;
-};
-
-template<>
-struct __vec_step_help<__vector unsigned char>
-{
- static const int _S_elem = 16;
+template <> class __altivec_bool_ret <16> {
+ typedef __vector __bool char __ret;
};
-template<>
-struct __vec_step_help<__vector signed char>
-{
- static const int _S_elem = 16;
-};
-
-template<>
-struct __vec_step_help<__vector __bool char>
-{
- static const int _S_elem = 16;
-};
-
-template<>
-struct __vec_step_help<__vector float>
-{
- static const int _S_elem = 4;
-};
-
-#define vec_step(t) __vec_step_help<typeof(t)>::_S_elem
-
-/* vec_abs */
-
-inline __vector signed char
-vec_abs (__vector signed char a1)
-{
- return __builtin_altivec_abs_v16qi (a1);
-}
-
-inline __vector signed short
-vec_abs (__vector signed short a1)
-{
- return __builtin_altivec_abs_v8hi (a1);
-}
-
-inline __vector signed int
-vec_abs (__vector signed int a1)
-{
- return __builtin_altivec_abs_v4si (a1);
-}
-
-inline __vector float
-vec_abs (__vector float a1)
-{
- return __builtin_altivec_abs_v4sf (a1);
-}
-
-/* vec_abss */
-
-inline __vector signed char
-vec_abss (__vector signed char a1)
-{
- return __builtin_altivec_abss_v16qi (a1);
-}
-
-inline __vector signed short
-vec_abss (__vector signed short a1)
-{
- return __builtin_altivec_abss_v8hi (a1);
-}
-
-inline __vector signed int
-vec_abss (__vector signed int a1)
-{
- return __builtin_altivec_abss_v4si (a1);
-}
-
-/* vec_add */
-
-inline __vector signed char
-vec_add (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_add (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_add (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_add (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_add (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_add (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_add (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_add (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_add (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_add (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_add (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_add (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_add (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_add (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_add (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_add (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_add (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_add (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_add (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vaddfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vaddfp */
-
-inline __vector float
-vec_vaddfp (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vaddfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vadduwm */
-
-inline __vector signed int
-vec_vadduwm (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vadduwm (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vadduwm (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vadduwm (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vadduwm (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vadduwm (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vadduhm */
-
-inline __vector signed short
-vec_vadduhm (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vadduhm (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vadduhm (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vadduhm (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vadduhm (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vadduhm (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vaddubm */
-
-inline __vector signed char
-vec_vaddubm (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vaddubm (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vaddubm (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vaddubm (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vaddubm (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vaddubm (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_addc */
-
-inline __vector unsigned int
-vec_addc (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vaddcuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_adds */
-
-inline __vector unsigned char
-vec_adds (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_adds (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_adds (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_adds (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_adds (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_adds (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned short
-vec_adds (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_adds (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_adds (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_adds (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_adds (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_adds (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned int
-vec_adds (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_adds (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_adds (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_adds (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_adds (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_adds (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vaddsws */
-
-inline __vector signed int
-vec_vaddsws (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vaddsws (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vaddsws (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vadduws */
-
-inline __vector unsigned int
-vec_vadduws (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vadduws (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vadduws (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vaddshs */
-
-inline __vector signed short
-vec_vaddshs (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vaddshs (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vaddshs (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vadduhs */
-
-inline __vector unsigned short
-vec_vadduhs (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vadduhs (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vadduhs (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vaddsbs */
-
-inline __vector signed char
-vec_vaddsbs (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vaddsbs (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vaddsbs (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vaddubs */
-
-inline __vector unsigned char
-vec_vaddubs (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vaddubs (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vaddubs (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_and */
-
-inline __vector float
-vec_and (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_and (__vector float a1, __vector __bool int a2)
-{
- return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_and (__vector __bool int a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_and (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_and (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_and (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_and (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_and (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_and (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_and (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_and (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_and (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_and (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_and (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_and (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_and (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_and (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_and (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_and (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_and (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_and (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_and (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_and (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_and (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_andc */
-
-inline __vector float
-vec_andc (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_andc (__vector float a1, __vector __bool int a2)
-{
- return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_andc (__vector __bool int a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_andc (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_andc (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_andc (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_andc (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_andc (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_andc (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_andc (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_andc (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_andc (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_andc (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_andc (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_andc (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_andc (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_andc (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_andc (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_andc (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_andc (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_andc (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_andc (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_andc (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_andc (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_avg */
-
-inline __vector unsigned char
-vec_avg (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_avg (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned short
-vec_avg (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_avg (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned int
-vec_avg (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_avg (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vavgsw */
-
-inline __vector signed int
-vec_vavgsw (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vavguw */
-
-inline __vector unsigned int
-vec_vavguw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vavgsh */
-
-inline __vector signed short
-vec_vavgsh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vavguh */
-
-inline __vector unsigned short
-vec_vavguh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vavgsb */
-
-inline __vector signed char
-vec_vavgsb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vavgub */
-
-inline __vector unsigned char
-vec_vavgub (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_ceil */
-
-inline __vector float
-vec_ceil (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vrfip ((__vector float) a1);
-}
-
-/* vec_cmpb */
-
-inline __vector signed int
-vec_cmpb (__vector float a1, __vector float a2)
-{
- return (__vector signed int) __builtin_altivec_vcmpbfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_cmpeq */
-
-inline __vector __bool char
-vec_cmpeq (__vector signed char a1, __vector signed char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector __bool char
-vec_cmpeq (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector __bool short
-vec_cmpeq (__vector signed short a1, __vector signed short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __bool short
-vec_cmpeq (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __bool int
-vec_cmpeq (__vector signed int a1, __vector signed int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_cmpeq (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_cmpeq (__vector float a1, __vector float a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vcmpeqfp */
-
-inline __vector __bool int
-vec_vcmpeqfp (__vector float a1, __vector float a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vcmpequw */
-
-inline __vector __bool int
-vec_vcmpequw (__vector signed int a1, __vector signed int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_vcmpequw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vcmpequh */
-
-inline __vector __bool short
-vec_vcmpequh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __bool short
-vec_vcmpequh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vcmpequb */
-
-inline __vector __bool char
-vec_vcmpequb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector __bool char
-vec_vcmpequb (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_cmpge */
-
-inline __vector __bool int
-vec_cmpge (__vector float a1, __vector float a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_cmpgt */
-
-inline __vector __bool char
-vec_cmpgt (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector __bool char
-vec_cmpgt (__vector signed char a1, __vector signed char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector __bool short
-vec_cmpgt (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __bool short
-vec_cmpgt (__vector signed short a1, __vector signed short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __bool int
-vec_cmpgt (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_cmpgt (__vector signed int a1, __vector signed int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_cmpgt (__vector float a1, __vector float a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vcmpgtfp */
-
-inline __vector __bool int
-vec_vcmpgtfp (__vector float a1, __vector float a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vcmpgtsw */
-
-inline __vector __bool int
-vec_vcmpgtsw (__vector signed int a1, __vector signed int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vcmpgtuw */
-
-inline __vector __bool int
-vec_vcmpgtuw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vcmpgtsh */
-
-inline __vector __bool short
-vec_vcmpgtsh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vcmpgtuh */
-
-inline __vector __bool short
-vec_vcmpgtuh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vcmpgtsb */
-
-inline __vector __bool char
-vec_vcmpgtsb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vcmpgtub */
-
-inline __vector __bool char
-vec_vcmpgtub (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_cmple */
-
-inline __vector __bool int
-vec_cmple (__vector float a1, __vector float a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) a2, (__vector float) a1);
-}
-
-/* vec_cmplt */
-
-inline __vector __bool char
-vec_cmplt (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline __vector __bool char
-vec_cmplt (__vector signed char a1, __vector signed char a2)
-{
- return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline __vector __bool short
-vec_cmplt (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline __vector __bool short
-vec_cmplt (__vector signed short a1, __vector signed short a2)
-{
- return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline __vector __bool int
-vec_cmplt (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline __vector __bool int
-vec_cmplt (__vector signed int a1, __vector signed int a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline __vector __bool int
-vec_cmplt (__vector float a1, __vector float a2)
-{
- return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a2, (__vector float) a1);
-}
-
-/* vec_ctf */
-
-inline __vector float
-vec_ctf (__vector unsigned int a1, const int a2)
-{
- return (__vector float) __builtin_altivec_vcfux ((__vector signed int) a1, a2);
-}
-
-inline __vector float
-vec_ctf (__vector signed int a1, const int a2)
-{
- return (__vector float) __builtin_altivec_vcfsx ((__vector signed int) a1, a2);
-}
-
-/* vec_vcfsx */
-
-inline __vector float
-vec_vcfsx (__vector signed int a1, const int a2)
-{
- return (__vector float) __builtin_altivec_vcfsx ((__vector signed int) a1, a2);
-}
-
-/* vec_vcfux */
-
-inline __vector float
-vec_vcfux (__vector unsigned int a1, const int a2)
-{
- return (__vector float) __builtin_altivec_vcfux ((__vector signed int) a1, a2);
-}
-
-/* vec_cts */
-
-inline __vector signed int
-vec_cts (__vector float a1, const int a2)
-{
- return (__vector signed int) __builtin_altivec_vctsxs ((__vector float) a1, a2);
-}
-
-/* vec_ctu */
-
-inline __vector unsigned int
-vec_ctu (__vector float a1, const int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vctuxs ((__vector float) a1, a2);
-}
-
-/* vec_dss */
-
-inline void
-vec_dss (const int a1)
-{
- __builtin_altivec_dss (a1);
-}
-
-/* vec_dssall */
-
-inline void
-vec_dssall (void)
-{
- __builtin_altivec_dssall ();
-}
-
-/* vec_dst */
-
-inline void
-vec_dst (const __vector unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector __bool char *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector signed short *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector __bool short *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector __pixel *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector signed int *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector __bool int *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const __vector float *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const short *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const int *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const unsigned long *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const long *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dst (const float *a1, int a2, const int a3)
-{
- __builtin_altivec_dst ((void *) a1, a2, a3);
-}
-
-/* vec_dstst */
-
-inline void
-vec_dstst (const __vector unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector __bool char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector signed short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector __bool short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector __pixel *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector signed int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector __bool int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const __vector float *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const unsigned long *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const long *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstst (const float *a1, int a2, const int a3)
-{
- __builtin_altivec_dstst ((void *) a1, a2, a3);
-}
-
-/* vec_dststt */
-
-inline void
-vec_dststt (const __vector unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector __bool char *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector signed short *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector __bool short *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector __pixel *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector signed int *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector __bool int *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const __vector float *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const short *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const int *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const unsigned long *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const long *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dststt (const float *a1, int a2, const int a3)
-{
- __builtin_altivec_dststt ((void *) a1, a2, a3);
-}
-
-/* vec_dstt */
-
-inline void
-vec_dstt (const __vector unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector __bool char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector signed short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector __bool short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector __pixel *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector signed int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector __bool int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const __vector float *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const unsigned char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const signed char *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const unsigned short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const short *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const unsigned int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const int *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const unsigned long *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const long *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-inline void
-vec_dstt (const float *a1, int a2, const int a3)
-{
- __builtin_altivec_dstt ((void *) a1, a2, a3);
-}
-
-/* vec_expte */
-
-inline __vector float
-vec_expte (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vexptefp ((__vector float) a1);
-}
-
-/* vec_floor */
-
-inline __vector float
-vec_floor (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vrfim ((__vector float) a1);
-}
-
-/* vec_ld */
-
-inline __vector float
-vec_ld (int a1, const __vector float *a2)
-{
- return (__vector float) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector float
-vec_ld (int a1, const float *a2)
-{
- return (__vector float) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector __bool int
-vec_ld (int a1, const __vector __bool int *a2)
-{
- return (__vector __bool int) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_ld (int a1, const __vector signed int *a2)
-{
- return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_ld (int a1, const int *a2)
-{
- return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_ld (int a1, const long *a2)
-{
- return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_ld (int a1, const __vector unsigned int *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_ld (int a1, const unsigned int *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_ld (int a1, const unsigned long *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector __bool short
-vec_ld (int a1, const __vector __bool short *a2)
-{
- return (__vector __bool short) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector __pixel
-vec_ld (int a1, const __vector __pixel *a2)
-{
- return (__vector __pixel) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector signed short
-vec_ld (int a1, const __vector signed short *a2)
-{
- return (__vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector signed short
-vec_ld (int a1, const short *a2)
-{
- return (__vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector unsigned short
-vec_ld (int a1, const __vector unsigned short *a2)
-{
- return (__vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector unsigned short
-vec_ld (int a1, const unsigned short *a2)
-{
- return (__vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector __bool char
-vec_ld (int a1, const __vector __bool char *a2)
-{
- return (__vector __bool char) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector signed char
-vec_ld (int a1, const __vector signed char *a2)
-{
- return (__vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector signed char
-vec_ld (int a1, const signed char *a2)
-{
- return (__vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_ld (int a1, const __vector unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_ld (int a1, const unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
-}
-
-/* vec_lde */
-
-inline __vector signed char
-vec_lde (int a1, const signed char *a2)
-{
- return (__vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lde (int a1, const unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
-}
-
-inline __vector signed short
-vec_lde (int a1, const short *a2)
-{
- return (__vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
-}
-
-inline __vector unsigned short
-vec_lde (int a1, const unsigned short *a2)
-{
- return (__vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
-}
-
-inline __vector float
-vec_lde (int a1, const float *a2)
-{
- return (__vector float) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_lde (int a1, const int *a2)
-{
- return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_lde (int a1, const unsigned int *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_lde (int a1, const long *a2)
-{
- return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_lde (int a1, const unsigned long *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-/* vec_lvewx */
-
-inline __vector float
-vec_lvewx (int a1, float *a2)
-{
- return (__vector float) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_lvewx (int a1, int *a2)
-{
- return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_lvewx (int a1, unsigned int *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_lvewx (int a1, long *a2)
-{
- return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_lvewx (int a1, unsigned long *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
-}
-
-/* vec_lvehx */
-
-inline __vector signed short
-vec_lvehx (int a1, short *a2)
-{
- return (__vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
-}
-
-inline __vector unsigned short
-vec_lvehx (int a1, unsigned short *a2)
-{
- return (__vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
-}
-
-/* vec_lvebx */
-
-inline __vector signed char
-vec_lvebx (int a1, signed char *a2)
-{
- return (__vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvebx (int a1, unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
-}
-
-/* vec_ldl */
-
-inline __vector float
-vec_ldl (int a1, const __vector float *a2)
-{
- return (__vector float) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector float
-vec_ldl (int a1, const float *a2)
-{
- return (__vector float) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector __bool int
-vec_ldl (int a1, const __vector __bool int *a2)
-{
- return (__vector __bool int) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_ldl (int a1, const __vector signed int *a2)
-{
- return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_ldl (int a1, const int *a2)
-{
- return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector signed int
-vec_ldl (int a1, const long *a2)
-{
- return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_ldl (int a1, const __vector unsigned int *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_ldl (int a1, const unsigned int *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector unsigned int
-vec_ldl (int a1, const unsigned long *a2)
-{
- return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector __bool short
-vec_ldl (int a1, const __vector __bool short *a2)
-{
- return (__vector __bool short) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector __pixel
-vec_ldl (int a1, const __vector __pixel *a2)
-{
- return (__vector __pixel) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector signed short
-vec_ldl (int a1, const __vector signed short *a2)
-{
- return (__vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector signed short
-vec_ldl (int a1, const short *a2)
-{
- return (__vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector unsigned short
-vec_ldl (int a1, const __vector unsigned short *a2)
-{
- return (__vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector unsigned short
-vec_ldl (int a1, const unsigned short *a2)
-{
- return (__vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector __bool char
-vec_ldl (int a1, const __vector __bool char *a2)
-{
- return (__vector __bool char) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector signed char
-vec_ldl (int a1, const __vector signed char *a2)
-{
- return (__vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector signed char
-vec_ldl (int a1, const signed char *a2)
-{
- return (__vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_ldl (int a1, const __vector unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_ldl (int a1, const unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
-}
-
-/* vec_loge */
-
-inline __vector float
-vec_loge (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vlogefp ((__vector float) a1);
-}
-
-/* vec_lvsl */
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile signed char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile unsigned short *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile short *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile unsigned int *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile int *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile unsigned long *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile long *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsl (int a1, const volatile float *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
-}
-
-/* vec_lvsr */
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile unsigned char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile signed char *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile unsigned short *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile short *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile unsigned int *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile int *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile unsigned long *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile long *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-inline __vector unsigned char
-vec_lvsr (int a1, const volatile float *a2)
-{
- return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
-}
-
-/* vec_madd */
-
-inline __vector float
-vec_madd (__vector float a1, __vector float a2, __vector float a3)
-{
- return (__vector float) __builtin_altivec_vmaddfp ((__vector float) a1, (__vector float) a2, (__vector float) a3);
-}
-
-/* vec_madds */
-
-inline __vector signed short
-vec_madds (__vector signed short a1, __vector signed short a2, __vector signed short a3)
-{
- return (__vector signed short) __builtin_altivec_vmhaddshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
-}
-
-/* vec_max */
-
-inline __vector unsigned char
-vec_max (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_max (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_max (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_max (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_max (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_max (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned short
-vec_max (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_max (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_max (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_max (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_max (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_max (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned int
-vec_max (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_max (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_max (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_max (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_max (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_max (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_max (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vmaxfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vmaxfp */
-
-inline __vector float
-vec_vmaxfp (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vmaxfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vmaxsw */
-
-inline __vector signed int
-vec_vmaxsw (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vmaxsw (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vmaxsw (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vmaxuw */
-
-inline __vector unsigned int
-vec_vmaxuw (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vmaxuw (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vmaxuw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vmaxsh */
-
-inline __vector signed short
-vec_vmaxsh (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vmaxsh (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vmaxsh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmaxuh */
-
-inline __vector unsigned short
-vec_vmaxuh (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vmaxuh (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vmaxuh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmaxsb */
-
-inline __vector signed char
-vec_vmaxsb (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vmaxsb (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vmaxsb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vmaxub */
-
-inline __vector unsigned char
-vec_vmaxub (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vmaxub (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vmaxub (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_mergeh */
-
-inline __vector __bool char
-vec_mergeh (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_mergeh (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_mergeh (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector __bool short
-vec_mergeh (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __pixel
-vec_mergeh (__vector __pixel a1, __vector __pixel a2)
-{
- return (__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_mergeh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_mergeh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector float
-vec_mergeh (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_mergeh (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_mergeh (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_mergeh (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vmrghw */
-
-inline __vector float
-vec_vmrghw (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_vmrghw (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vmrghw (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vmrghw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vmrghh */
-
-inline __vector __bool short
-vec_vmrghh (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vmrghh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vmrghh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __pixel
-vec_vmrghh (__vector __pixel a1, __vector __pixel a2)
-{
- return (__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmrghb */
-
-inline __vector __bool char
-vec_vmrghb (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vmrghb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vmrghb (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_mergel */
-
-inline __vector __bool char
-vec_mergel (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_mergel (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_mergel (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector __bool short
-vec_mergel (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __pixel
-vec_mergel (__vector __pixel a1, __vector __pixel a2)
-{
- return (__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_mergel (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_mergel (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector float
-vec_mergel (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_mergel (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_mergel (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_mergel (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vmrglw */
-
-inline __vector float
-vec_vmrglw (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vmrglw (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vmrglw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_vmrglw (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vmrglh */
-
-inline __vector __bool short
-vec_vmrglh (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vmrglh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vmrglh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __pixel
-vec_vmrglh (__vector __pixel a1, __vector __pixel a2)
-{
- return (__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmrglb */
-
-inline __vector __bool char
-vec_vmrglb (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vmrglb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vmrglb (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_mfvscr */
-
-inline __vector unsigned short
-vec_mfvscr (void)
-{
- return (__vector unsigned short) __builtin_altivec_mfvscr ();
-}
-
-/* vec_min */
-
-inline __vector unsigned char
-vec_min (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_min (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_min (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_min (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_min (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_min (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned short
-vec_min (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_min (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_min (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_min (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_min (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_min (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned int
-vec_min (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_min (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_min (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_min (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_min (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_min (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_min (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vminfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vminfp */
-
-inline __vector float
-vec_vminfp (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vminfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vminsw */
-
-inline __vector signed int
-vec_vminsw (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vminsw (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vminsw (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vminuw */
-
-inline __vector unsigned int
-vec_vminuw (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vminuw (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vminuw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vminsh */
-
-inline __vector signed short
-vec_vminsh (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vminsh (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vminsh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vminuh */
-
-inline __vector unsigned short
-vec_vminuh (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vminuh (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vminuh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vminsb */
-
-inline __vector signed char
-vec_vminsb (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vminsb (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vminsb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vminub */
-
-inline __vector unsigned char
-vec_vminub (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vminub (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vminub (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_mladd */
-
-inline __vector signed short
-vec_mladd (__vector signed short a1, __vector signed short a2, __vector signed short a3)
-{
- return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
-}
-
-inline __vector signed short
-vec_mladd (__vector signed short a1, __vector unsigned short a2, __vector unsigned short a3)
-{
- return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
-}
-
-inline __vector signed short
-vec_mladd (__vector unsigned short a1, __vector signed short a2, __vector signed short a3)
-{
- return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
-}
-
-inline __vector unsigned short
-vec_mladd (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned short a3)
-{
- return (__vector unsigned short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
-}
-
-/* vec_mradds */
-
-inline __vector signed short
-vec_mradds (__vector signed short a1, __vector signed short a2, __vector signed short a3)
-{
- return (__vector signed short) __builtin_altivec_vmhraddshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
-}
-
-/* vec_msum */
-
-inline __vector unsigned int
-vec_msum (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
-}
-
-inline __vector signed int
-vec_msum (__vector signed char a1, __vector unsigned char a2, __vector signed int a3)
-{
- return (__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
-}
-
-inline __vector unsigned int
-vec_msum (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-inline __vector signed int
-vec_msum (__vector signed short a1, __vector signed short a2, __vector signed int a3)
-{
- return (__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-/* vec_vmsumshm */
-
-inline __vector signed int
-vec_vmsumshm (__vector signed short a1, __vector signed short a2, __vector signed int a3)
-{
- return (__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-/* vec_vmsumuhm */
-
-inline __vector unsigned int
-vec_vmsumuhm (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-/* vec_vmsummbm */
-
-inline __vector signed int
-vec_vmsummbm (__vector signed char a1, __vector unsigned char a2, __vector signed int a3)
-{
- return (__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
-}
-
-/* vec_vmsumubm */
-
-inline __vector unsigned int
-vec_vmsumubm (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
-}
-
-/* vec_msums */
-
-inline __vector unsigned int
-vec_msums (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-inline __vector signed int
-vec_msums (__vector signed short a1, __vector signed short a2, __vector signed int a3)
-{
- return (__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-/* vec_vmsumshs */
-
-inline __vector signed int
-vec_vmsumshs (__vector signed short a1, __vector signed short a2, __vector signed int a3)
-{
- return (__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-/* vec_vmsumuhs */
-
-inline __vector unsigned int
-vec_vmsumuhs (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
-}
-
-/* vec_mtvscr */
-
-inline void
-vec_mtvscr (__vector signed int a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector unsigned int a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector __bool int a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector signed short a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector unsigned short a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector __bool short a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector __pixel a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector signed char a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector unsigned char a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-inline void
-vec_mtvscr (__vector __bool char a1)
-{
- __builtin_altivec_mtvscr ((__vector signed int) a1);
-}
-
-/* vec_mule */
-
-inline __vector unsigned short
-vec_mule (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_mule (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed short) __builtin_altivec_vmulesb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned int
-vec_mule (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_mule (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmulesh */
-
-inline __vector signed int
-vec_vmulesh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmuleuh */
-
-inline __vector unsigned int
-vec_vmuleuh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmulesb */
-
-inline __vector signed short
-vec_vmulesb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vmuleub */
-
-inline __vector unsigned short
-vec_vmuleub (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_mulo */
-
-inline __vector unsigned short
-vec_mulo (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_mulo (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned int
-vec_mulo (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_mulo (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmulosh */
-
-inline __vector signed int
-vec_vmulosh (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmulouh */
-
-inline __vector unsigned int
-vec_vmulouh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vmulosb */
-
-inline __vector signed short
-vec_vmulosb (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vmuloub */
-
-inline __vector unsigned short
-vec_vmuloub (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_nmsub */
-
-inline __vector float
-vec_nmsub (__vector float a1, __vector float a2, __vector float a3)
-{
- return (__vector float) __builtin_altivec_vnmsubfp ((__vector float) a1, (__vector float) a2, (__vector float) a3);
-}
-
-/* vec_nor */
-
-inline __vector float
-vec_nor (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_nor (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_nor (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_nor (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_nor (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_nor (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_nor (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_nor (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_nor (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_nor (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_or */
-
-inline __vector float
-vec_or (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_or (__vector float a1, __vector __bool int a2)
-{
- return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_or (__vector __bool int a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_or (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_or (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_or (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_or (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_or (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_or (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_or (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_or (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_or (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_or (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_or (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_or (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_or (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_or (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_or (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_or (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_or (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_or (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_or (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_or (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_or (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_pack */
-
-inline __vector signed char
-vec_pack (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned char
-vec_pack (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector __bool char
-vec_pack (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_pack (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_pack (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_pack (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vpkuwum */
-
-inline __vector __bool short
-vec_vpkuwum (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_vpkuwum (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_vpkuwum (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vpkuhum */
-
-inline __vector __bool char
-vec_vpkuhum (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed char
-vec_vpkuhum (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned char
-vec_vpkuhum (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_packpx */
-
-inline __vector __pixel
-vec_packpx (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector __pixel) __builtin_altivec_vpkpx ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_packs */
-
-inline __vector unsigned char
-vec_packs (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed char
-vec_packs (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_packs (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_packs (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vpkswss */
-
-inline __vector signed short
-vec_vpkswss (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vpkuwus */
-
-inline __vector unsigned short
-vec_vpkuwus (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vpkshss */
-
-inline __vector signed char
-vec_vpkshss (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vpkuhus */
-
-inline __vector unsigned char
-vec_vpkuhus (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_packsu */
-
-inline __vector unsigned char
-vec_packsu (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned char
-vec_packsu (__vector signed short a1, __vector signed short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_packsu (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_packsu (__vector signed int a1, __vector signed int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vpkswus */
-
-inline __vector unsigned short
-vec_vpkswus (__vector signed int a1, __vector signed int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vpkshus */
-
-inline __vector unsigned char
-vec_vpkshus (__vector signed short a1, __vector signed short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_perm */
-
-inline __vector float
-vec_perm (__vector float a1, __vector float a2, __vector unsigned char a3)
-{
- return (__vector float) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector signed int
-vec_perm (__vector signed int a1, __vector signed int a2, __vector unsigned char a3)
-{
- return (__vector signed int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector unsigned int
-vec_perm (__vector unsigned int a1, __vector unsigned int a2, __vector unsigned char a3)
-{
- return (__vector unsigned int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector __bool int
-vec_perm (__vector __bool int a1, __vector __bool int a2, __vector unsigned char a3)
-{
- return (__vector __bool int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector signed short
-vec_perm (__vector signed short a1, __vector signed short a2, __vector unsigned char a3)
-{
- return (__vector signed short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector unsigned short
-vec_perm (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned char a3)
-{
- return (__vector unsigned short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector __bool short
-vec_perm (__vector __bool short a1, __vector __bool short a2, __vector unsigned char a3)
-{
- return (__vector __bool short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector __pixel
-vec_perm (__vector __pixel a1, __vector __pixel a2, __vector unsigned char a3)
-{
- return (__vector __pixel) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector signed char
-vec_perm (__vector signed char a1, __vector signed char a2, __vector unsigned char a3)
-{
- return (__vector signed char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector unsigned char
-vec_perm (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned char a3)
-{
- return (__vector unsigned char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-inline __vector __bool char
-vec_perm (__vector __bool char a1, __vector __bool char a2, __vector unsigned char a3)
-{
- return (__vector __bool char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
-}
-
-/* vec_re */
-
-inline __vector float
-vec_re (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vrefp ((__vector float) a1);
-}
-
-/* vec_rl */
-
-inline __vector signed char
-vec_rl (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_rl (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_rl (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_rl (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_rl (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_rl (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vrlw */
-
-inline __vector signed int
-vec_vrlw (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vrlw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vrlh */
-
-inline __vector signed short
-vec_vrlh (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vrlh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vrlb */
-
-inline __vector signed char
-vec_vrlb (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vrlb (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_round */
-
-inline __vector float
-vec_round (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vrfin ((__vector float) a1);
-}
-
-/* vec_rsqrte */
-
-inline __vector float
-vec_rsqrte (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vrsqrtefp ((__vector float) a1);
-}
-
-/* vec_sel */
-
-inline __vector float
-vec_sel (__vector float a1, __vector float a2, __vector __bool int a3)
-{
- return (__vector float) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector float
-vec_sel (__vector float a1, __vector float a2, __vector unsigned int a3)
-{
- return (__vector float) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector signed int
-vec_sel (__vector signed int a1, __vector signed int a2, __vector __bool int a3)
-{
- return (__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector signed int
-vec_sel (__vector signed int a1, __vector signed int a2, __vector unsigned int a3)
-{
- return (__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector unsigned int
-vec_sel (__vector unsigned int a1, __vector unsigned int a2, __vector __bool int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector unsigned int
-vec_sel (__vector unsigned int a1, __vector unsigned int a2, __vector unsigned int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector __bool int
-vec_sel (__vector __bool int a1, __vector __bool int a2, __vector __bool int a3)
-{
- return (__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector __bool int
-vec_sel (__vector __bool int a1, __vector __bool int a2, __vector unsigned int a3)
-{
- return (__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector signed short
-vec_sel (__vector signed short a1, __vector signed short a2, __vector __bool short a3)
-{
- return (__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector signed short
-vec_sel (__vector signed short a1, __vector signed short a2, __vector unsigned short a3)
-{
- return (__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector unsigned short
-vec_sel (__vector unsigned short a1, __vector unsigned short a2, __vector __bool short a3)
-{
- return (__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector unsigned short
-vec_sel (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned short a3)
-{
- return (__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector __bool short
-vec_sel (__vector __bool short a1, __vector __bool short a2, __vector __bool short a3)
-{
- return (__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector __bool short
-vec_sel (__vector __bool short a1, __vector __bool short a2, __vector unsigned short a3)
-{
- return (__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector signed char
-vec_sel (__vector signed char a1, __vector signed char a2, __vector __bool char a3)
-{
- return (__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector signed char
-vec_sel (__vector signed char a1, __vector signed char a2, __vector unsigned char a3)
-{
- return (__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector unsigned char
-vec_sel (__vector unsigned char a1, __vector unsigned char a2, __vector __bool char a3)
-{
- return (__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector unsigned char
-vec_sel (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned char a3)
-{
- return (__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector __bool char
-vec_sel (__vector __bool char a1, __vector __bool char a2, __vector __bool char a3)
-{
- return (__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-inline __vector __bool char
-vec_sel (__vector __bool char a1, __vector __bool char a2, __vector unsigned char a3)
-{
- return (__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
-}
-
-/* vec_sl */
-
-inline __vector signed char
-vec_sl (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_sl (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_sl (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_sl (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_sl (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sl (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vslw */
-
-inline __vector signed int
-vec_vslw (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vslw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vslh */
-
-inline __vector signed short
-vec_vslh (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vslh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vslb */
-
-inline __vector signed char
-vec_vslb (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vslb (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_sld */
-
-inline __vector float
-vec_sld (__vector float a1, __vector float a2, const int a3)
-{
- return (__vector float) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector signed int
-vec_sld (__vector signed int a1, __vector signed int a2, const int a3)
-{
- return (__vector signed int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector unsigned int
-vec_sld (__vector unsigned int a1, __vector unsigned int a2, const int a3)
-{
- return (__vector unsigned int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector __bool int
-vec_sld (__vector __bool int a1, __vector __bool int a2, const int a3)
-{
- return (__vector __bool int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector signed short
-vec_sld (__vector signed short a1, __vector signed short a2, const int a3)
-{
- return (__vector signed short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector unsigned short
-vec_sld (__vector unsigned short a1, __vector unsigned short a2, const int a3)
-{
- return (__vector unsigned short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector __bool short
-vec_sld (__vector __bool short a1, __vector __bool short a2, const int a3)
-{
- return (__vector __bool short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector __pixel
-vec_sld (__vector __pixel a1, __vector __pixel a2, const int a3)
-{
- return (__vector __pixel) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector signed char
-vec_sld (__vector signed char a1, __vector signed char a2, const int a3)
-{
- return (__vector signed char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector unsigned char
-vec_sld (__vector unsigned char a1, __vector unsigned char a2, const int a3)
-{
- return (__vector unsigned char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-inline __vector __bool char
-vec_sld (__vector __bool char a1, __vector __bool char a2, const int a3)
-{
- return (__vector __bool char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
-}
-
-/* vec_sll */
-
-inline __vector signed int
-vec_sll (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sll (__vector signed int a1, __vector unsigned short a2)
-{
- return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sll (__vector signed int a1, __vector unsigned char a2)
-{
- return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sll (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sll (__vector unsigned int a1, __vector unsigned short a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sll (__vector unsigned int a1, __vector unsigned char a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_sll (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_sll (__vector __bool int a1, __vector unsigned short a2)
-{
- return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_sll (__vector __bool int a1, __vector unsigned char a2)
-{
- return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_sll (__vector signed short a1, __vector unsigned int a2)
-{
- return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_sll (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_sll (__vector signed short a1, __vector unsigned char a2)
-{
- return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_sll (__vector unsigned short a1, __vector unsigned int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_sll (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_sll (__vector unsigned short a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_sll (__vector __bool short a1, __vector unsigned int a2)
-{
- return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_sll (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_sll (__vector __bool short a1, __vector unsigned char a2)
-{
- return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_sll (__vector __pixel a1, __vector unsigned int a2)
-{
- return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_sll (__vector __pixel a1, __vector unsigned short a2)
-{
- return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_sll (__vector __pixel a1, __vector unsigned char a2)
-{
- return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_sll (__vector signed char a1, __vector unsigned int a2)
-{
- return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_sll (__vector signed char a1, __vector unsigned short a2)
-{
- return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_sll (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_sll (__vector unsigned char a1, __vector unsigned int a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_sll (__vector unsigned char a1, __vector unsigned short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_sll (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_sll (__vector __bool char a1, __vector unsigned int a2)
-{
- return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_sll (__vector __bool char a1, __vector unsigned short a2)
-{
- return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_sll (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_slo */
-
-inline __vector float
-vec_slo (__vector float a1, __vector signed char a2)
-{
- return (__vector float) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_slo (__vector float a1, __vector unsigned char a2)
-{
- return (__vector float) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_slo (__vector signed int a1, __vector signed char a2)
-{
- return (__vector signed int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_slo (__vector signed int a1, __vector unsigned char a2)
-{
- return (__vector signed int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_slo (__vector unsigned int a1, __vector signed char a2)
-{
- return (__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_slo (__vector unsigned int a1, __vector unsigned char a2)
-{
- return (__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_slo (__vector signed short a1, __vector signed char a2)
-{
- return (__vector signed short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_slo (__vector signed short a1, __vector unsigned char a2)
-{
- return (__vector signed short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_slo (__vector unsigned short a1, __vector signed char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_slo (__vector unsigned short a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_slo (__vector __pixel a1, __vector signed char a2)
-{
- return (__vector __pixel) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_slo (__vector __pixel a1, __vector unsigned char a2)
-{
- return (__vector __pixel) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_slo (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_slo (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_slo (__vector unsigned char a1, __vector signed char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_slo (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_splat */
-
-inline __vector signed char
-vec_splat (__vector signed char a1, const int a2)
-{
- return (__vector signed char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
-}
-
-inline __vector unsigned char
-vec_splat (__vector unsigned char a1, const int a2)
-{
- return (__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
-}
-
-inline __vector __bool char
-vec_splat (__vector __bool char a1, const int a2)
-{
- return (__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
-}
-
-inline __vector signed short
-vec_splat (__vector signed short a1, const int a2)
-{
- return (__vector signed short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-inline __vector unsigned short
-vec_splat (__vector unsigned short a1, const int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-inline __vector __bool short
-vec_splat (__vector __bool short a1, const int a2)
-{
- return (__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-inline __vector __pixel
-vec_splat (__vector __pixel a1, const int a2)
-{
- return (__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-inline __vector float
-vec_splat (__vector float a1, const int a2)
-{
- return (__vector float) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-inline __vector signed int
-vec_splat (__vector signed int a1, const int a2)
-{
- return (__vector signed int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-inline __vector unsigned int
-vec_splat (__vector unsigned int a1, const int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-inline __vector __bool int
-vec_splat (__vector __bool int a1, const int a2)
-{
- return (__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-/* vec_vspltw */
-
-inline __vector float
-vec_vspltw (__vector float a1, const int a2)
-{
- return (__vector float) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-inline __vector signed int
-vec_vspltw (__vector signed int a1, const int a2)
-{
- return (__vector signed int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-inline __vector unsigned int
-vec_vspltw (__vector unsigned int a1, const int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-inline __vector __bool int
-vec_vspltw (__vector __bool int a1, const int a2)
-{
- return (__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
-}
-
-/* vec_vsplth */
-
-inline __vector __bool short
-vec_vsplth (__vector __bool short a1, const int a2)
-{
- return (__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-inline __vector signed short
-vec_vsplth (__vector signed short a1, const int a2)
-{
- return (__vector signed short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-inline __vector unsigned short
-vec_vsplth (__vector unsigned short a1, const int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-inline __vector __pixel
-vec_vsplth (__vector __pixel a1, const int a2)
-{
- return (__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
-}
-
-/* vec_vspltb */
-
-inline __vector signed char
-vec_vspltb (__vector signed char a1, const int a2)
-{
- return (__vector signed char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
-}
-
-inline __vector unsigned char
-vec_vspltb (__vector unsigned char a1, const int a2)
-{
- return (__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
-}
-
-inline __vector __bool char
-vec_vspltb (__vector __bool char a1, const int a2)
-{
- return (__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
-}
-
-/* vec_splat_s8 */
-
-inline __vector signed char
-vec_splat_s8 (const int a1)
-{
- return (__vector signed char) __builtin_altivec_vspltisb (a1);
-}
-
-/* vec_splat_s16 */
-
-inline __vector signed short
-vec_splat_s16 (const int a1)
-{
- return (__vector signed short) __builtin_altivec_vspltish (a1);
-}
-
-/* vec_splat_s32 */
-
-inline __vector signed int
-vec_splat_s32 (const int a1)
-{
- return (__vector signed int) __builtin_altivec_vspltisw (a1);
-}
-
-/* vec_splat_u8 */
-
-inline __vector unsigned char
-vec_splat_u8 (const int a1)
-{
- return (__vector unsigned char) __builtin_altivec_vspltisb (a1);
-}
-
-/* vec_splat_u16 */
-
-inline __vector unsigned short
-vec_splat_u16 (const int a1)
-{
- return (__vector unsigned short) __builtin_altivec_vspltish (a1);
-}
-
-/* vec_splat_u32 */
-
-inline __vector unsigned int
-vec_splat_u32 (const int a1)
-{
- return (__vector unsigned int) __builtin_altivec_vspltisw (a1);
-}
-
-/* vec_sr */
-
-inline __vector signed char
-vec_sr (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_sr (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_sr (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_sr (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_sr (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sr (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsrw */
-
-inline __vector signed int
-vec_vsrw (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vsrw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsrh */
-
-inline __vector signed short
-vec_vsrh (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vsrh (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vsrb */
-
-inline __vector signed char
-vec_vsrb (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vsrb (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_sra */
-
-inline __vector signed char
-vec_sra (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_sra (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_sra (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_sra (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_sra (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sra (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsraw */
-
-inline __vector signed int
-vec_vsraw (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vsraw (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsrah */
-
-inline __vector signed short
-vec_vsrah (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vsrah (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vsrab */
-
-inline __vector signed char
-vec_vsrab (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vsrab (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_srl */
-
-inline __vector signed int
-vec_srl (__vector signed int a1, __vector unsigned int a2)
-{
- return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_srl (__vector signed int a1, __vector unsigned short a2)
-{
- return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_srl (__vector signed int a1, __vector unsigned char a2)
-{
- return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_srl (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_srl (__vector unsigned int a1, __vector unsigned short a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_srl (__vector unsigned int a1, __vector unsigned char a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_srl (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_srl (__vector __bool int a1, __vector unsigned short a2)
-{
- return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_srl (__vector __bool int a1, __vector unsigned char a2)
-{
- return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_srl (__vector signed short a1, __vector unsigned int a2)
-{
- return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_srl (__vector signed short a1, __vector unsigned short a2)
-{
- return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_srl (__vector signed short a1, __vector unsigned char a2)
-{
- return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_srl (__vector unsigned short a1, __vector unsigned int a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_srl (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_srl (__vector unsigned short a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_srl (__vector __bool short a1, __vector unsigned int a2)
-{
- return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_srl (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_srl (__vector __bool short a1, __vector unsigned char a2)
-{
- return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_srl (__vector __pixel a1, __vector unsigned int a2)
-{
- return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_srl (__vector __pixel a1, __vector unsigned short a2)
-{
- return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_srl (__vector __pixel a1, __vector unsigned char a2)
-{
- return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_srl (__vector signed char a1, __vector unsigned int a2)
-{
- return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_srl (__vector signed char a1, __vector unsigned short a2)
-{
- return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_srl (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_srl (__vector unsigned char a1, __vector unsigned int a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_srl (__vector unsigned char a1, __vector unsigned short a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_srl (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_srl (__vector __bool char a1, __vector unsigned int a2)
-{
- return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_srl (__vector __bool char a1, __vector unsigned short a2)
-{
- return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_srl (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_sro */
-
-inline __vector float
-vec_sro (__vector float a1, __vector signed char a2)
-{
- return (__vector float) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_sro (__vector float a1, __vector unsigned char a2)
-{
- return (__vector float) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sro (__vector signed int a1, __vector signed char a2)
-{
- return (__vector signed int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sro (__vector signed int a1, __vector unsigned char a2)
-{
- return (__vector signed int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sro (__vector unsigned int a1, __vector signed char a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sro (__vector unsigned int a1, __vector unsigned char a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_sro (__vector signed short a1, __vector signed char a2)
-{
- return (__vector signed short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_sro (__vector signed short a1, __vector unsigned char a2)
-{
- return (__vector signed short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_sro (__vector unsigned short a1, __vector signed char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_sro (__vector unsigned short a1, __vector unsigned char a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_sro (__vector __pixel a1, __vector signed char a2)
-{
- return (__vector __pixel) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __pixel
-vec_sro (__vector __pixel a1, __vector unsigned char a2)
-{
- return (__vector __pixel) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_sro (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_sro (__vector signed char a1, __vector unsigned char a2)
-{
- return (__vector signed char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_sro (__vector unsigned char a1, __vector signed char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_sro (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_st */
-
-inline void
-vec_st (__vector float a1, int a2, __vector float *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector float a1, int a2, float *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector signed int a1, int a2, __vector signed int *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector signed int a1, int a2, int *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector unsigned int a1, int a2, __vector unsigned int *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector unsigned int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool int a1, int a2, __vector __bool int *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool int a1, int a2, int *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector signed short a1, int a2, __vector signed short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector signed short a1, int a2, short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector unsigned short a1, int a2, __vector unsigned short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector unsigned short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool short a1, int a2, __vector __bool short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __pixel a1, int a2, __vector __pixel *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __pixel a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __pixel a1, int a2, short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool short a1, int a2, short *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector signed char a1, int a2, __vector signed char *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector signed char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector unsigned char a1, int a2, __vector unsigned char *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector unsigned char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool char a1, int a2, __vector __bool char *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_st (__vector __bool char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-/* vec_ste */
-
-inline void
-vec_ste (__vector signed char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector unsigned char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __bool char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __bool char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector signed short a1, int a2, short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector unsigned short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __bool short a1, int a2, short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __bool short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __pixel a1, int a2, short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __pixel a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector float a1, int a2, float *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector signed int a1, int a2, int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector unsigned int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __bool int a1, int a2, int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_ste (__vector __bool int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-/* vec_stvewx */
-
-inline void
-vec_stvewx (__vector float a1, int a2, float *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvewx (__vector signed int a1, int a2, int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvewx (__vector unsigned int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvewx (__vector __bool int a1, int a2, int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvewx (__vector __bool int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
-}
-
-/* vec_stvehx */
-
-inline void
-vec_stvehx (__vector signed short a1, int a2, short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvehx (__vector unsigned short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvehx (__vector __bool short a1, int a2, short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvehx (__vector __bool short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvehx (__vector __pixel a1, int a2, short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvehx (__vector __pixel a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
-}
-
-/* vec_stvebx */
-
-inline void
-vec_stvebx (__vector signed char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvebx (__vector unsigned char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvebx (__vector __bool char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stvebx (__vector __bool char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
-}
-
-/* vec_stl */
-
-inline void
-vec_stl (__vector float a1, int a2, __vector float *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector float a1, int a2, float *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector signed int a1, int a2, __vector signed int *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector signed int a1, int a2, int *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector unsigned int a1, int a2, __vector unsigned int *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector unsigned int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool int a1, int a2, __vector __bool int *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool int a1, int a2, unsigned int *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool int a1, int a2, int *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector signed short a1, int a2, __vector signed short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector signed short a1, int a2, short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector unsigned short a1, int a2, __vector unsigned short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector unsigned short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool short a1, int a2, __vector __bool short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool short a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool short a1, int a2, short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __pixel a1, int a2, __vector __pixel *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __pixel a1, int a2, unsigned short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __pixel a1, int a2, short *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector signed char a1, int a2, __vector signed char *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector signed char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector unsigned char a1, int a2, __vector unsigned char *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector unsigned char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool char a1, int a2, __vector __bool char *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool char a1, int a2, unsigned char *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-inline void
-vec_stl (__vector __bool char a1, int a2, signed char *a3)
-{
- __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
-}
-
-/* vec_sub */
-
-inline __vector signed char
-vec_sub (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_sub (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_sub (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_sub (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_sub (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_sub (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed short
-vec_sub (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_sub (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_sub (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_sub (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_sub (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_sub (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed int
-vec_sub (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sub (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sub (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sub (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sub (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_sub (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_sub (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vsubfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vsubfp */
-
-inline __vector float
-vec_vsubfp (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vsubfp ((__vector float) a1, (__vector float) a2);
-}
-
-/* vec_vsubuwm */
-
-inline __vector signed int
-vec_vsubuwm (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vsubuwm (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vsubuwm (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vsubuwm (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vsubuwm (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vsubuwm (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsubuhm */
-
-inline __vector signed short
-vec_vsubuhm (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vsubuhm (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vsubuhm (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vsubuhm (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vsubuhm (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vsubuhm (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vsububm */
-
-inline __vector signed char
-vec_vsububm (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vsububm (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vsububm (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vsububm (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vsububm (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vsububm (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_subc */
-
-inline __vector unsigned int
-vec_subc (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubcuw ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_subs */
-
-inline __vector unsigned char
-vec_subs (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_subs (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_subs (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_subs (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_subs (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_subs (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned short
-vec_subs (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_subs (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_subs (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_subs (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_subs (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_subs (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned int
-vec_subs (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_subs (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_subs (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_subs (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_subs (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_subs (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsubsws */
-
-inline __vector signed int
-vec_vsubsws (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vsubsws (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_vsubsws (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsubuws */
-
-inline __vector unsigned int
-vec_vsubuws (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vsubuws (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_vsubuws (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_vsubshs */
-
-inline __vector signed short
-vec_vsubshs (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vsubshs (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector signed short
-vec_vsubshs (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vsubuhs */
-
-inline __vector unsigned short
-vec_vsubuhs (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vsubuhs (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline __vector unsigned short
-vec_vsubuhs (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
-}
-
-/* vec_vsubsbs */
-
-inline __vector signed char
-vec_vsubsbs (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vsubsbs (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector signed char
-vec_vsubsbs (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_vsububs */
-
-inline __vector unsigned char
-vec_vsububs (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vsububs (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline __vector unsigned char
-vec_vsububs (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
-}
-
-/* vec_sum4s */
-
-inline __vector unsigned int
-vec_sum4s (__vector unsigned char a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sum4s (__vector signed char a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_sum4s (__vector signed short a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) a1, (__vector signed int) a2);
-}
-
-/* vec_vsum4shs */
-
-inline __vector signed int
-vec_vsum4shs (__vector signed short a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) a1, (__vector signed int) a2);
-}
-
-/* vec_vsum4sbs */
-
-inline __vector signed int
-vec_vsum4sbs (__vector signed char a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) a1, (__vector signed int) a2);
-}
-
-/* vec_vsum4ubs */
-
-inline __vector unsigned int
-vec_vsum4ubs (__vector unsigned char a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) a1, (__vector signed int) a2);
-}
-
-/* vec_sum2s */
-
-inline __vector signed int
-vec_sum2s (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsum2sws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_sums */
-
-inline __vector signed int
-vec_sums (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vsumsws ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_trunc */
-
-inline __vector float
-vec_trunc (__vector float a1)
-{
- return (__vector float) __builtin_altivec_vrfiz ((__vector float) a1);
-}
-
-/* vec_unpackh */
-
-inline __vector signed short
-vec_unpackh (__vector signed char a1)
-{
- return (__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
-}
-
-inline __vector __bool short
-vec_unpackh (__vector __bool char a1)
-{
- return (__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
-}
-
-inline __vector signed int
-vec_unpackh (__vector signed short a1)
-{
- return (__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
-}
-
-inline __vector __bool int
-vec_unpackh (__vector __bool short a1)
-{
- return (__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
-}
-
-inline __vector unsigned int
-vec_unpackh (__vector __pixel a1)
-{
- return (__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) a1);
-}
-
-/* vec_vupkhsh */
-
-inline __vector __bool int
-vec_vupkhsh (__vector __bool short a1)
-{
- return (__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
-}
-
-inline __vector signed int
-vec_vupkhsh (__vector signed short a1)
-{
- return (__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
-}
-
-/* vec_vupkhpx */
-
-inline __vector unsigned int
-vec_vupkhpx (__vector __pixel a1)
-{
- return (__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) a1);
-}
-
-/* vec_vupkhsb */
-
-inline __vector __bool short
-vec_vupkhsb (__vector __bool char a1)
-{
- return (__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
-}
-
-inline __vector signed short
-vec_vupkhsb (__vector signed char a1)
-{
- return (__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
-}
-
-/* vec_unpackl */
-
-inline __vector signed short
-vec_unpackl (__vector signed char a1)
-{
- return (__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) a1);
-}
-
-inline __vector __bool short
-vec_unpackl (__vector __bool char a1)
-{
- return (__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) a1);
-}
-
-inline __vector unsigned int
-vec_unpackl (__vector __pixel a1)
-{
- return (__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) a1);
-}
-
-inline __vector signed int
-vec_unpackl (__vector signed short a1)
-{
- return (__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) a1);
-}
-
-inline __vector __bool int
-vec_unpackl (__vector __bool short a1)
-{
- return (__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) a1);
-}
-
-/* vec_vupklpx */
-
-inline __vector unsigned int
-vec_vupklpx (__vector __pixel a1)
-{
- return (__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) a1);
-}
-
-/* vec_upklsh */
-
-inline __vector __bool int
-vec_vupklsh (__vector __bool short a1)
-{
- return (__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) a1);
-}
-
-inline __vector signed int
-vec_vupklsh (__vector signed short a1)
-{
- return (__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) a1);
-}
-
-/* vec_vupklsb */
-
-inline __vector __bool short
-vec_vupklsb (__vector __bool char a1)
-{
- return (__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) a1);
-}
-
-inline __vector signed short
-vec_vupklsb (__vector signed char a1)
-{
- return (__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) a1);
-}
-
-/* vec_xor */
-
-inline __vector float
-vec_xor (__vector float a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_xor (__vector float a1, __vector __bool int a2)
-{
- return (__vector float) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector float
-vec_xor (__vector __bool int a1, __vector float a2)
-{
- return (__vector float) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool int
-vec_xor (__vector __bool int a1, __vector __bool int a2)
-{
- return (__vector __bool int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_xor (__vector __bool int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_xor (__vector signed int a1, __vector __bool int a2)
-{
- return (__vector signed int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed int
-vec_xor (__vector signed int a1, __vector signed int a2)
-{
- return (__vector signed int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_xor (__vector __bool int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_xor (__vector unsigned int a1, __vector __bool int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned int
-vec_xor (__vector unsigned int a1, __vector unsigned int a2)
-{
- return (__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool short
-vec_xor (__vector __bool short a1, __vector __bool short a2)
-{
- return (__vector __bool short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_xor (__vector __bool short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_xor (__vector signed short a1, __vector __bool short a2)
-{
- return (__vector signed short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed short
-vec_xor (__vector signed short a1, __vector signed short a2)
-{
- return (__vector signed short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_xor (__vector __bool short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_xor (__vector unsigned short a1, __vector __bool short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned short
-vec_xor (__vector unsigned short a1, __vector unsigned short a2)
-{
- return (__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_xor (__vector __bool char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector __bool char
-vec_xor (__vector __bool char a1, __vector __bool char a2)
-{
- return (__vector __bool char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_xor (__vector signed char a1, __vector __bool char a2)
-{
- return (__vector signed char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector signed char
-vec_xor (__vector signed char a1, __vector signed char a2)
-{
- return (__vector signed char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_xor (__vector __bool char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_xor (__vector unsigned char a1, __vector __bool char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline __vector unsigned char
-vec_xor (__vector unsigned char a1, __vector unsigned char a2)
-{
- return (__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
-}
-
-/* vec_all_eq */
-
-inline int
-vec_all_eq (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_eq (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, a1, a2);
-}
-
-inline int
-vec_all_eq (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_eq (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_eq (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector __pixel a1, __vector __pixel a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_eq (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_eq (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_eq (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_eq (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_eq (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_eq (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a2);
-}
-
-/* vec_all_ge */
-
-inline int
-vec_all_ge (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_ge (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_ge (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_ge (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_ge (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_ge (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_ge (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_ge (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_ge (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_ge (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_ge (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_ge (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_ge (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_ge (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_ge (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_ge (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_ge (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_ge (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_ge (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_LT, a1, a2);
-}
-
-/* vec_all_gt */
-
-inline int
-vec_all_gt (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_gt (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_gt (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_gt (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_gt (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_gt (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_gt (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_gt (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_gt (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_gt (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_gt (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_gt (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_gt (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_gt (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_gt (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_gt (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_gt (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_gt (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_gt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_LT, a1, a2);
-}
-
-/* vec_all_in */
-
-inline int
-vec_all_in (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpbfp_p (__CR6_EQ, a1, a2);
-}
-
-/* vec_all_le */
-
-inline int
-vec_all_le (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_le (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_le (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_le (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_le (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_le (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_le (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_le (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_le (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_le (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_le (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_le (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_le (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_le (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_le (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_le (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_le (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_le (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_le (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_LT, a2, a1);
-}
-
-/* vec_all_lt */
-
-inline int
-vec_all_lt (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_lt (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_lt (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_lt (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_lt (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_lt (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_all_lt (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_lt (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_lt (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_lt (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_lt (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_lt (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_all_lt (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_lt (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_lt (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_lt (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_lt (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_lt (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_all_lt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_LT, a2, a1);
-}
-
-/* vec_all_nan */
-
-inline int
-vec_all_nan (__vector float a1)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a1);
-}
-
-/* vec_all_ne */
-
-inline int
-vec_all_ne (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_ne (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_ne (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_ne (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_all_ne (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector __pixel a1, __vector __pixel a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_all_ne (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_ne (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_ne (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_ne (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_ne (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_all_ne (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a2);
-}
-
-/* vec_all_nge */
-
-inline int
-vec_all_nge (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_EQ, a1, a2);
-}
-
-/* vec_all_ngt */
-
-inline int
-vec_all_ngt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a1, a2);
-}
-
-/* vec_all_nle */
-
-inline int
-vec_all_nle (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_EQ, a2, a1);
-}
-
-/* vec_all_nlt */
-
-inline int
-vec_all_nlt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a2, a1);
-}
-
-/* vec_all_numeric */
-
-inline int
-vec_all_numeric (__vector float a1)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a1);
-}
-
-/* vec_any_eq */
-
-inline int
-vec_any_eq (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_eq (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_eq (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_eq (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_eq (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector __pixel a1, __vector __pixel a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_eq (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_eq (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_eq (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_eq (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_eq (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_eq (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a2);
-}
-
-/* vec_any_ge */
-
-inline int
-vec_any_ge (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_ge (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_ge (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_ge (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_ge (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_ge (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_ge (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_ge (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_ge (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_ge (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_ge (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_ge (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_ge (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_ge (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_ge (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_ge (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_ge (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_ge (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_ge (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, a1, a2);
-}
-
-/* vec_any_gt */
-
-inline int
-vec_any_gt (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_gt (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_gt (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_gt (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_gt (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_gt (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_gt (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_gt (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_gt (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_gt (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_gt (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_gt (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_gt (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_gt (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_gt (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_gt (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_gt (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_gt (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_gt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, a1, a2);
-}
-
-/* vec_any_le */
-
-inline int
-vec_any_le (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_le (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_le (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_le (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_le (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_le (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_le (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_le (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_le (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_le (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_le (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_le (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_le (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_le (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_le (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_le (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_le (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_le (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_le (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, a2, a1);
-}
-
-/* vec_any_lt */
-
-inline int
-vec_any_lt (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_lt (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_lt (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_lt (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_lt (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_lt (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
-}
-
-inline int
-vec_any_lt (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_lt (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_lt (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_lt (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_lt (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_lt (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
-}
-
-inline int
-vec_any_lt (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_lt (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_lt (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_lt (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_lt (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_lt (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
-}
-
-inline int
-vec_any_lt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, a2, a1);
-}
-
-/* vec_any_nan */
-
-inline int
-vec_any_nan (__vector float a1)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1);
-}
-
-/* vec_any_ne */
-
-inline int
-vec_any_ne (__vector signed char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_ne (__vector signed char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_ne (__vector unsigned char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_ne (__vector unsigned char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool char a1, __vector __bool char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool char a1, __vector unsigned char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool char a1, __vector signed char a2)
-{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
-}
-
-inline int
-vec_any_ne (__vector signed short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector signed short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector unsigned short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector unsigned short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool short a1, __vector __bool short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool short a1, __vector unsigned short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool short a1, __vector signed short a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector __pixel a1, __vector __pixel a2)
-{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
-}
-
-inline int
-vec_any_ne (__vector signed int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_ne (__vector signed int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_ne (__vector unsigned int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_ne (__vector unsigned int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool int a1, __vector __bool int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool int a1, __vector unsigned int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_ne (__vector __bool int a1, __vector signed int a2)
-{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
-}
-
-inline int
-vec_any_ne (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a2);
-}
-
-/* vec_any_nge */
-
-inline int
-vec_any_nge (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a1, a2);
-}
-
-/* vec_any_ngt */
-
-inline int
-vec_any_ngt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a1, a2);
-}
-
-/* vec_any_nle */
-
-inline int
-vec_any_nle (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a2, a1);
-}
-
-/* vec_any_nlt */
-
-inline int
-vec_any_nlt (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a2, a1);
-}
-
-/* vec_any_numeric */
-
-inline int
-vec_any_numeric (__vector float a1)
-{
- return __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a1);
-}
-
-/* vec_any_out */
-
-inline int
-vec_any_out (__vector float a1, __vector float a2)
-{
- return __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, a1, a2);
-}
-
-} /* extern "C++" */
-
-#else /* not C++ */
-
-/* "... and so I think no man in a century will suffer as greatly as
- you will." */
-
-/* Helper macros. */
-
-#define __un_args_eq(xtype, x) \
- __builtin_types_compatible_p (xtype, typeof (x))
-
-#define __bin_args_eq(xtype, x, ytype, y) \
- (__builtin_types_compatible_p (xtype, typeof (x)) \
- && __builtin_types_compatible_p (ytype, typeof (y)))
-
-#define __tern_args_eq(xtype, x, ytype, y, ztype, z) \
- (__builtin_types_compatible_p (xtype, typeof (x)) \
- && __builtin_types_compatible_p (ytype, typeof (y)) \
- && __builtin_types_compatible_p (ztype, typeof (z)))
-
-#define __ch(x, y, z) __builtin_choose_expr (x, y, z)
-
-#define vec_step(t) \
- __ch (__builtin_types_compatible_p (typeof (t), __vector signed int), 4, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector unsigned int), 4, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector __bool int), 4, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector signed short), 8, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector unsigned short), 8, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector __bool short), 8, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector __pixel), 8, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector signed char), 16, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector unsigned char), 16, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector __bool char), 16, \
- __ch (__builtin_types_compatible_p (typeof (t), __vector float), 4, \
- __builtin_altivec_compiletime_error ("vec_step"))))))))))))
-
-#define vec_abs(a) \
- __ch (__un_args_eq (__vector signed char, (a)), \
- ((__vector signed char) __builtin_altivec_abs_v16qi ((__vector signed char) (a))), \
- __ch (__un_args_eq (__vector signed short, (a)), \
- ((__vector signed short) __builtin_altivec_abs_v8hi ((__vector signed short) (a))), \
- __ch (__un_args_eq (__vector signed int, (a)), \
- ((__vector signed int) __builtin_altivec_abs_v4si ((__vector signed int) (a))), \
- __ch (__un_args_eq (__vector float, (a)), \
- ((__vector float) __builtin_altivec_abs_v4sf ((__vector float) (a))), \
- __builtin_altivec_compiletime_error ("vec_abs")))))
-
-#define vec_abss(a) \
- __ch (__un_args_eq (__vector signed char, (a)), \
- ((__vector signed char) __builtin_altivec_abss_v16qi ((__vector signed char) (a))), \
- __ch (__un_args_eq (__vector signed short, (a)), \
- ((__vector signed short) __builtin_altivec_abss_v8hi ((__vector signed short) (a))), \
- __ch (__un_args_eq (__vector signed int, (a)), \
- ((__vector signed int) __builtin_altivec_abss_v4si ((__vector signed int) (a))), \
- __builtin_altivec_compiletime_error ("vec_abss"))))
-
-#define vec_vaddubm(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vaddubm")))))))
-
-#define vec_vadduhm(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vadduhm")))))))
-
-#define vec_vadduwm(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vadduwm")))))))
-
-#define vec_vaddfp(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vaddfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vaddfp"))
-
-#define vec_add(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vaddfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_add"))))))))))))))))))))
-
-#define vec_addc(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vaddcuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_addc"))
-
-#define vec_adds(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_adds")))))))))))))))))))
-
-#define vec_vaddsws(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vaddsws"))))
-
-#define vec_vadduws(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vadduws"))))
-
-#define vec_vaddshs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vaddshs"))))
-
-#define vec_vadduhs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vadduhs"))))
-
-#define vec_vaddsbs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vaddsbs"))))
-
-#define vec_vaddubs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vaddubs"))))
-
-#define vec_and(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
- ((__vector float) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_and")))))))))))))))))))))))))
-
-#define vec_andc(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
- ((__vector float) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_andc")))))))))))))))))))))))))
-
-#define vec_avg(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_avg")))))))
-
-#define vec_vavgsw(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vavgsw"))
-
-#define vec_vavguw(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vavguw"))
-
-#define vec_vavgsh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vavgsh"))
-
-#define vec_vavguh(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vavguh"))
-
-#define vec_vavgsb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vavgsb"))
-
-#define vec_vavgub(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vavgub"))
-
-#define vec_ceil(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vrfip ((__vector float) (a1))), \
- __builtin_altivec_compiletime_error ("vec_ceil"))
-
-#define vec_cmpb(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector signed int) __builtin_altivec_vcmpbfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_cmpb"))
-
-#define vec_cmpeq(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_cmpeq"))))))))
-
-#define vec_vcmpeqfp(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpeqfp"))
-
-#define vec_vcmpequw(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpequw")))
-
-#define vec_vcmpequh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpequh")))
-
-#define vec_vcmpequb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpequb")))
-
-#define vec_cmpge(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_cmpge"))
-
-#define vec_cmpgt(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_cmpgt"))))))))
-
-#define vec_vcmpgtfp(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpgtfp"))
-
-#define vec_vcmpgtsw(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpgtsw"))
-
-#define vec_vcmpgtuw(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpgtuw"))
-
-#define vec_vcmpgtsh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpgtsh"))
-
-#define vec_vcmpgtuh(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpgtuh"))
-
-#define vec_vcmpgtsb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpgtsb"))
-
-#define vec_vcmpgtub(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) (a1), (__vector signed char) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcmpgtub"))
-
-#define vec_cmple(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) (a2), (__vector float) (a1))), \
- __builtin_altivec_compiletime_error ("vec_cmple"))
-
-#define vec_cmplt(a2, a1) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_cmplt"))))))))
-
-#define vec_ctf(a1, a2) \
-__ch (__un_args_eq (__vector unsigned int, (a1)), \
- ((__vector float) __builtin_altivec_vcfux ((__vector signed int) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector signed int, (a1)), \
- ((__vector float) __builtin_altivec_vcfsx ((__vector signed int) (a1), (const int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_ctf")))
-
-#define vec_vcfsx(a1, a2) \
-__ch (__un_args_eq (__vector signed int, (a1)), \
- ((__vector float) __builtin_altivec_vcfsx ((__vector signed int) (a1), (const int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcfsx"))
-
-#define vec_vcfux(a1, a2) \
-__ch (__un_args_eq (__vector unsigned int, (a1)), \
- ((__vector float) __builtin_altivec_vcfux ((__vector signed int) (a1), (const int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_vcfux"))
-
-#define vec_cts(a1, a2) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector signed int) __builtin_altivec_vctsxs ((__vector float) (a1), (const int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_cts"))
-
-#define vec_ctu(a1, a2) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector unsigned int) __builtin_altivec_vctuxs ((__vector float) (a1), (const int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_ctu"))
-
-#define vec_dss(a1) __builtin_altivec_dss ((const int) (a1));
+/* Be very liberal in the pairs we accept. Mistakes such as passing
+ a `vector char' and `vector short' will be caught by the middle-end,
+ while any attempt to detect them here would produce hard to understand
+ error messages involving the implementation details of AltiVec. */
+#define __altivec_binary_pred(NAME, CALL) \
+template <class T, class U> \
+typename __altivec_bool_ret <vec_step (T)>::__ret \
+NAME (T a1, U a2) \
+{ \
+ return CALL; \
+}
+
+__altivec_binary_pred(vec_cmplt,
+ __builtin_vec_cmpgt (a2, a1))
+__altivec_binary_pred(vec_cmple,
+ __builtin_altivec_cmpge (a2, a1))
+
+__altivec_scalar_pred(vec_all_in,
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_any_out,
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, a1, a2))
+
+__altivec_unary_pred(vec_all_nan,
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a1))
+__altivec_unary_pred(vec_any_nan,
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1))
+
+__altivec_unary_pred(vec_all_numeric,
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a1))
+__altivec_unary_pred(vec_any_numeric,
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a1))
+
+__altivec_scalar_pred(vec_all_eq,
+ __builtin_vec_vcmpeq_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_all_ne,
+ __builtin_vec_vcmpeq_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_any_eq,
+ __builtin_vec_vcmpeq_p (__CR6_EQ_REV, a1, a2))
+__altivec_scalar_pred(vec_any_ne,
+ __builtin_vec_vcmpeq_p (__CR6_LT_REV, a1, a2))
+
+__altivec_scalar_pred(vec_all_gt,
+ __builtin_vec_vcmpgt_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_all_lt,
+ __builtin_vec_vcmpgt_p (__CR6_LT, a2, a1))
+__altivec_scalar_pred(vec_any_gt,
+ __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a1, a2))
+__altivec_scalar_pred(vec_any_lt,
+ __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a2, a1))
+
+__altivec_scalar_pred(vec_all_ngt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_all_nlt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a2, a1))
+__altivec_scalar_pred(vec_any_ngt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a1, a2))
+__altivec_scalar_pred(vec_any_nlt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a2, a1))
+
+/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types,
+ while for integer types it is converted to __builtin_vec_vcmpgt_p,
+ with inverted args and condition code. */
+__altivec_scalar_pred(vec_all_le,
+ __builtin_vec_vcmpge_p (__CR6_LT, a2, a1))
+__altivec_scalar_pred(vec_all_ge,
+ __builtin_vec_vcmpge_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_any_le,
+ __builtin_vec_vcmpge_p (__CR6_EQ_REV, a2, a1))
+__altivec_scalar_pred(vec_any_ge,
+ __builtin_vec_vcmpge_p (__CR6_EQ_REV, a1, a2))
+
+__altivec_scalar_pred(vec_all_nge,
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_all_nle,
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, a2, a1))
+__altivec_scalar_pred(vec_any_nge,
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a1, a2))
+__altivec_scalar_pred(vec_any_nle,
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a2, a1))
+
+#undef __altivec_scalar_pred
+#undef __altivec_unary_pred
+#undef __altivec_binary_pred
+#else
+#define vec_cmplt(a1, a2) __builtin_vec_cmpgt ((a2), (a1))
+#define vec_cmple(a1, a2) __builtin_altivec_vcmpgefp ((a2), (a1))
+
+#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2))
+#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2))
+
+#define vec_all_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1))
+#define vec_any_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (a1), (a1))
+
+#define vec_all_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT, (a1), (a1))
+#define vec_any_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (a1), (a1))
+
+#define vec_all_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT, (a1), (a2))
+#define vec_all_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ, (a1), (a2))
+#define vec_any_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ_REV, (a1), (a2))
+#define vec_any_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT_REV, (a1), (a2))
+
+#define vec_all_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a1), (a2))
+#define vec_all_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a2), (a1))
+#define vec_any_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a1), (a2))
+#define vec_any_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a2), (a1))
+
+#define vec_all_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a1), (a2))
+#define vec_all_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a2), (a1))
+#define vec_any_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a1), (a2))
+#define vec_any_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a2), (a1))
+
+/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types,
+ while for integer types it is converted to __builtin_vec_vcmpgt_p,
+ with inverted args and condition code. */
+#define vec_all_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a2), (a1))
+#define vec_all_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a1), (a2))
+#define vec_any_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a2), (a1))
+#define vec_any_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a1), (a2))
+
+#define vec_all_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a1), (a2))
+#define vec_all_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a2), (a1))
+#define vec_any_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a1), (a2))
+#define vec_any_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a2), (a1))
+#endif
+/* These do not accept vectors, so they do not have a __builtin_vec_*
+ counterpart. */
+#define vec_dss(x) __builtin_altivec_dss((x))
#define vec_dssall() __builtin_altivec_dssall ()
-
-#define vec_dst(a1, a2, a3) \
-__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed char, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool char, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed short, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool short, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __pixel, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed int, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool int, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector float, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned char, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const signed char, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned short, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const short, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned int, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const int, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned long, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const long, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const float, *(a1)), \
- __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
- __builtin_altivec_compiletime_error ("vec_dst")))))))))))))))))))))
-
-#define vec_dstst(a1, a2, a3) \
-__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed char, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool char, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed short, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool short, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __pixel, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed int, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool int, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector float, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned char, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const signed char, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned short, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const short, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned int, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const int, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned long, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const long, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const float, *(a1)), \
- __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
- __builtin_altivec_compiletime_error ("vec_dstst")))))))))))))))))))))
-
-#define vec_dststt(a1, a2, a3) \
-__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed char, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool char, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed short, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool short, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __pixel, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed int, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool int, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector float, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned char, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const signed char, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned short, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const short, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned int, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const int, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned long, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const long, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const float, *(a1)), \
- __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
- __builtin_altivec_compiletime_error ("vec_dststt")))))))))))))))))))))
-
-#define vec_dstt(a1, a2, a3) \
-__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed char, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool char, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed short, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool short, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __pixel, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector signed int, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector __bool int, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const __vector float, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned char, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const signed char, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned short, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const short, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned int, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const int, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const unsigned long, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const long, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (const float, *(a1)), \
- __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
- __builtin_altivec_compiletime_error ("vec_dstt")))))))))))))))))))))
-
-#define vec_expte(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vexptefp ((__vector float) (a1))), \
- __builtin_altivec_compiletime_error ("vec_expte"))
-
-#define vec_floor(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vrfim ((__vector float) (a1))), \
- __builtin_altivec_compiletime_error ("vec_floor"))
-
-#define vec_ld(a, b) \
-__ch (__un_args_eq (const __vector unsigned char, *(b)), \
- ((__vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const unsigned char, *(b)), \
- ((__vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector signed char, *(b)), \
- ((__vector signed char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const signed char, *(b)), \
- ((__vector signed char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector __bool char, *(b)), \
- ((__vector __bool char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector unsigned short, *(b)), \
- ((__vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const unsigned short, *(b)), \
- ((__vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector signed short, *(b)), \
- ((__vector signed short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const short, *(b)), \
- ((__vector signed short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector __bool short, *(b)), \
- ((__vector __bool short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector __pixel, *(b)), \
- ((__vector __pixel) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector unsigned int, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const unsigned int, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const unsigned long, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector signed int, *(b)), \
- ((__vector signed int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const int, *(b)), \
- ((__vector signed int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const long, *(b)), \
- ((__vector signed int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector __bool int, *(b)), \
- ((__vector __bool int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const __vector float, *(b)), \
- ((__vector float) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (const float, *(b)), \
- ((__vector float) __builtin_altivec_lvx ((a), (b))), \
-__builtin_altivec_compiletime_error ("vec_ld")))))))))))))))))))))
-
-#define vec_lde(a, b) \
-__ch (__un_args_eq (const unsigned char, *(b)), \
- ((__vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
-__ch (__un_args_eq (const signed char, *(b)), \
- ((__vector signed char) __builtin_altivec_lvebx ((a), (b))), \
-__ch (__un_args_eq (const unsigned short, *(b)), \
- ((__vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
-__ch (__un_args_eq (const short, *(b)), \
- ((__vector signed short) __builtin_altivec_lvehx ((a), (b))), \
-__ch (__un_args_eq (const unsigned long, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (const long, *(b)), \
- ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (const unsigned int, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (const int, *(b)), \
- ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (const float, *(b)), \
- ((__vector float) __builtin_altivec_lvewx ((a), (b))), \
-__builtin_altivec_compiletime_error ("vec_lde"))))))))))
-
-#define vec_lvewx(a, b) \
-__ch (__un_args_eq (unsigned int, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (signed int, *(b)), \
- ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (unsigned long, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (signed long, *(b)), \
- ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (float, *(b)), \
- ((__vector float) __builtin_altivec_lvewx ((a), (b))), \
-__builtin_altivec_compiletime_error ("vec_lvewx"))))))
-
-#define vec_lvehx(a, b) \
-__ch (__un_args_eq (unsigned short, *(b)), \
- ((__vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
-__ch (__un_args_eq (signed short, *(b)), \
- ((__vector signed short) __builtin_altivec_lvehx ((a), (b))), \
-__builtin_altivec_compiletime_error ("vec_lvehx")))
-
-#define vec_lvebx(a, b) \
-__ch (__un_args_eq (unsigned char, *(b)), \
- ((__vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
-__ch (__un_args_eq (signed char, *(b)), \
- ((__vector signed char) __builtin_altivec_lvebx ((a), (b))), \
-__builtin_altivec_compiletime_error ("vec_lvebx")))
-
-#define vec_ldl(a, b) \
-__ch (__un_args_eq (const __vector unsigned char, *(b)), \
- ((__vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const unsigned char, *(b)), \
- ((__vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector signed char, *(b)), \
- ((__vector signed char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const signed char, *(b)), \
- ((__vector signed char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector __bool char, *(b)), \
- ((__vector __bool char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector unsigned short, *(b)), \
- ((__vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const unsigned short, *(b)), \
- ((__vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector signed short, *(b)), \
- ((__vector signed short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const short, *(b)), \
- ((__vector signed short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector __bool short, *(b)), \
- ((__vector __bool short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector __pixel, *(b)), \
- ((__vector __pixel) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector unsigned int, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const unsigned int, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const unsigned long, *(b)), \
- ((__vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector signed int, *(b)), \
- ((__vector signed int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const int, *(b)), \
- ((__vector signed int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const long, *(b)), \
- ((__vector signed int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector __bool int, *(b)), \
- ((__vector __bool int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const __vector float, *(b)), \
- ((__vector float) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (const float, *(b)), \
- ((__vector float) __builtin_altivec_lvxl ((a), (b))), \
-__builtin_altivec_compiletime_error ("vec_ldl")))))))))))))))))))))
-
-#define vec_loge(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vlogefp ((__vector float) (a1))), \
- __builtin_altivec_compiletime_error ("vec_loge"))
-
-#define vec_lvsl(a1, a2) \
-__ch (__un_args_eq (const volatile unsigned char, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed char, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile unsigned short, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed short, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile unsigned int, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed int, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile unsigned long, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed long, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile float, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__builtin_altivec_compiletime_error ("vec_lvsl"))))))))))
-
-#define vec_lvsr(a1, a2) \
-__ch (__un_args_eq (const volatile unsigned char, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed char, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile unsigned short, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed short, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile unsigned int, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed int, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile unsigned long, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile signed long, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (const volatile float, *(a2)), \
- ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__builtin_altivec_compiletime_error ("vec_lvsr"))))))))))
-
-#define vec_madd(a1, a2, a3) \
-__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector float, (a3)), \
- ((__vector float) __builtin_altivec_vmaddfp ((a1), (a2), (a3))), \
-__builtin_altivec_compiletime_error ("vec_madd"))
-
-#define vec_madds(a1, a2, a3) \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
- ((__vector signed short) __builtin_altivec_vmhaddshs ((a1), (a2), (a3))), \
-__builtin_altivec_compiletime_error ("vec_madds"))
-
-#define vec_max(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vmaxfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_max"))))))))))))))))))))
-
-#define vec_vmaxfp(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vmaxfp ((__vector float) (a1), (__vector float) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmaxfp"))
-
-#define vec_vmaxsw(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmaxsw"))))
-
-#define vec_vmaxuw(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmaxuw"))))
-
-#define vec_vmaxsh(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmaxsh"))))
-
-#define vec_vmaxuh(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmaxuh"))))
-
-#define vec_vmaxsb(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmaxsb"))))
-
-#define vec_vmaxub(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmaxub"))))
-
-#define vec_mergeh(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- ((__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_mergeh"))))))))))))
-
-#define vec_vmrghw(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmrghw")))))
-
-#define vec_vmrghh(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- ((__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmrghh")))))
-
-#define vec_vmrghb(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmrghb"))))
-
-#define vec_mergel(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- ((__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_mergel"))))))))))))
-
-#define vec_vmrglw(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmrglw")))))
-
-#define vec_vmrglh(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- ((__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmrglh")))))
-
-#define vec_vmrglb(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmrglb"))))
-
-#define vec_mfvscr() (((__vector unsigned short) __builtin_altivec_mfvscr ()))
-
-#define vec_min(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vminfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_min"))))))))))))))))))))
-
-#define vec_vminfp(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vminfp ((__vector float) (a1), (__vector float) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vminfp"))
-
-#define vec_vminsw(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vminsw"))))
-
-#define vec_vminuw(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vminuw"))))
-
-#define vec_vminsh(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vminsh"))))
-
-#define vec_vminuh(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vminuh"))))
-
-#define vec_vminsb(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_minsb"))))
-
-#define vec_vminub(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vminub"))))
-
-#define vec_mladd(a1, a2, a3) \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
- ((__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector unsigned short, (a2), __vector unsigned short, (a3)), \
- ((__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
- ((__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned short, (a3)), \
- ((__vector unsigned short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
- __builtin_altivec_compiletime_error ("vec_mladd")))))
-
-#define vec_mradds(a1, a2, a3) \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
- ((__vector signed short) __builtin_altivec_vmhraddshs ((a1), (a2), (a3))), \
-__builtin_altivec_compiletime_error ("vec_mradds"))
-
-#define vec_msum(a1, a2, a3) \
-__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed char, (a1), __vector unsigned char, (a2), __vector signed int, (a3)), \
- ((__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
- ((__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
- __builtin_altivec_compiletime_error ("vec_msum")))))
-
-#define vec_vmsumshm(a1, a2, a3) \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
- ((__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
-__builtin_altivec_compiletime_error ("vec_vmsumshm"))
-
-#define vec_vmsumuhm(a1, a2, a3) \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
-__builtin_altivec_compiletime_error ("vec_vmsumuhm"))
-
-#define vec_vmsummbm(a1, a2, a3) \
-__ch (__tern_args_eq (__vector signed char, (a1), __vector unsigned char, (a2), __vector signed int, (a3)), \
- ((__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
-__builtin_altivec_compiletime_error ("vec_vmsummbm"))
-
-#define vec_vmsumubm(a1, a2, a3) \
-__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
-__builtin_altivec_compiletime_error ("vec_vmsummbm"))
-
-#define vec_msums(a1, a2, a3) \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
- ((__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
- __builtin_altivec_compiletime_error ("vec_msums")))
-
-#define vec_vmsumshs(a1, a2, a3) \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
- ((__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
-__builtin_altivec_compiletime_error ("vec_vmsumshs"))
-
-#define vec_vmsumuhs(a1, a2, a3) \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
-__builtin_altivec_compiletime_error ("vec_vmsumuhs"))
-
-#define vec_mtvscr(a1) \
-__ch (__un_args_eq (__vector signed int, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector unsigned int, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector __bool int, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector signed short, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector unsigned short, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector __bool short, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector __pixel, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector signed char, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector unsigned char, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
-__ch (__un_args_eq (__vector __bool char, (a1)), \
- __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
- __builtin_altivec_compiletime_error ("vec_mtvscr")))))))))))
-
-#define vec_mule(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed short) __builtin_altivec_vmulesb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_mule")))))
-
-#define vec_vmulesh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmulesh"))
-
-#define vec_vmuleuh(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmuleuh"))
-
-#define vec_vmulesb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed short) __builtin_altivec_vmulesb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmulesb"))
-
-#define vec_vmuleub(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmuleub"))
-
-#define vec_mulo(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) (a1), (__vector signed short) (a2))), \
- __builtin_altivec_compiletime_error ("vec_mulo")))))
-
-#define vec_vmulosh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmulosh"))
-
-#define vec_vmulouh(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmulouh"))
-
-#define vec_vmulosb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmulosb"))
-
-#define vec_vmuloub(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vmuloub"))
-
-#define vec_nmsub(a1, a2, a3) \
-__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector float, (a3)), \
- ((__vector float) __builtin_altivec_vnmsubfp ((__vector float) (a1), (__vector float) (a2), (__vector float) (a3))), \
- __builtin_altivec_compiletime_error ("vec_nmsub"))
-
-#define vec_nor(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_nor")))))))))))
-
-#define vec_or(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
- ((__vector float) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_or")))))))))))))))))))))))))
-
-#define vec_pack(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_pack")))))))
-
-#define vec_vpkuwum(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkuwum"))))
-
-#define vec_vpkuhum(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkuhum"))))
-
-#define vec_packpx(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector __pixel) __builtin_altivec_vpkpx ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_packpx"))
-
-#define vec_packs(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_packs")))))
-
-#define vec_vpkswss(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkswss"))
-
-#define vec_vpkuwus(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkuwus"))
-
-#define vec_vpkshss(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkshss"))
-
-#define vec_vpkuhus(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkuhus"))
-
-#define vec_packsu(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_packsu")))))
-
-#define vec_vpkswus(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkswus"))
-
-#define vec_vpkshus(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vpkshus"))
-
-#define vec_perm(a1, a2, a3) \
-__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector unsigned char, (a3)), \
- ((__vector float) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector signed int, (a1), __vector signed int, (a2), __vector unsigned char, (a3)), \
- ((__vector signed int) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2), __vector unsigned char, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector __bool int, (a1), __vector __bool int, (a2), __vector unsigned char, (a3)), \
- ((__vector __bool int) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector unsigned char, (a3)), \
- ((__vector signed short) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned char, (a3)), \
- ((__vector unsigned short) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector __bool short, (a1), __vector __bool short, (a2), __vector unsigned char, (a3)), \
- ((__vector __bool short) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector __pixel, (a1), __vector __pixel, (a2), __vector unsigned char, (a3)), \
- ((__vector __pixel) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector signed char, (a1), __vector signed char, (a2), __vector unsigned char, (a3)), \
- ((__vector signed char) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned char, (a3)), \
- ((__vector unsigned char) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
-__ch (__tern_args_eq (__vector __bool char, (a1), __vector __bool char, (a2), __vector unsigned char, (a3)), \
- ((__vector __bool char) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
- __builtin_altivec_compiletime_error ("vec_perm"))))))))))))
-
-#define vec_re(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vrefp ((__vector float) (a1))), \
-__builtin_altivec_compiletime_error ("vec_re"))
-
-#define vec_rl(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_rl")))))))
-
-#define vec_vrlw(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vrlw")))
-
-#define vec_vrlh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vrlh")))
-
-#define vec_vrlb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vrlb")))
-
-#define vec_round(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vrfin ((__vector float) (a1))), \
-__builtin_altivec_compiletime_error ("vec_round"))
-
-#define vec_rsqrte(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vrsqrtefp ((__vector float) (a1))), \
-__builtin_altivec_compiletime_error ("vec_rsqrte"))
-
-#define vec_sel(a1, a2, a3) \
-__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector __bool int, (a3)), \
- ((__vector float) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector unsigned int, (a3)), \
- ((__vector float) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector __bool int, (a1), __vector __bool int, (a2), __vector __bool int, (a3)), \
- ((__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector __bool int, (a1), __vector __bool int, (a2), __vector unsigned int, (a3)), \
- ((__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed int, (a1), __vector signed int, (a2), __vector __bool int, (a3)), \
- ((__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed int, (a1), __vector signed int, (a2), __vector unsigned int, (a3)), \
- ((__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2), __vector __bool int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2), __vector unsigned int, (a3)), \
- ((__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector __bool short, (a1), __vector __bool short, (a2), __vector __bool short, (a3)), \
- ((__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector __bool short, (a1), __vector __bool short, (a2), __vector unsigned short, (a3)), \
- ((__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector __bool short, (a3)), \
- ((__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector unsigned short, (a3)), \
- ((__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector __bool short, (a3)), \
- ((__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned short, (a3)), \
- ((__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector __bool char, (a1), __vector __bool char, (a2), __vector __bool char, (a3)), \
- ((__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector __bool char, (a1), __vector __bool char, (a2), __vector unsigned char, (a3)), \
- ((__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed char, (a1), __vector signed char, (a2), __vector __bool char, (a3)), \
- ((__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector signed char, (a1), __vector signed char, (a2), __vector unsigned char, (a3)), \
- ((__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector __bool char, (a3)), \
- ((__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
-__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned char, (a3)), \
- ((__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
- __builtin_altivec_compiletime_error ("vec_sel")))))))))))))))))))))
-
-#define vec_sl(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_sl")))))))
-
-#define vec_vslw(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vslw")))
-
-#define vec_vslh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vslh")))
-
-#define vec_vslb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vslb")))
-
-#define vec_sld(a1, a2, a3) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
- __builtin_altivec_compiletime_error ("vec_sld"))))))))))))
-
-#define vec_sll(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned short, (a2)), \
- ((__vector signed int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
- ((__vector signed int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned int, (a2)), \
- ((__vector signed short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
- ((__vector signed short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned int, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned short, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned int, (a2)), \
- ((__vector signed char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned short, (a2)), \
- ((__vector signed char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_sll")))))))))))))))))))))))))))))))
-
-#define vec_slo(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector signed char, (a2)), \
- ((__vector float) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector unsigned char, (a2)), \
- ((__vector float) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed char, (a2)), \
- ((__vector signed int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
- ((__vector signed int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector signed char, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed char, (a2)), \
- ((__vector signed short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
- ((__vector signed short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector signed char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector signed char, (a2)), \
- ((__vector __pixel) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
- ((__vector __pixel) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector signed char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_slo")))))))))))))))))
-
-#define vec_splat(a1, a2) \
-__ch (__un_args_eq (__vector signed char, (a1)), \
- ((__vector signed char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector unsigned char, (a1)), \
- ((__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector __bool char, (a1)), \
- ((__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector signed short, (a1)), \
- ((__vector signed short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector unsigned short, (a1)), \
- ((__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector __bool short, (a1)), \
- ((__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector __pixel, (a1)), \
- ((__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector signed int, (a1)), \
- ((__vector signed int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector unsigned int, (a1)), \
- ((__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector __bool int, (a1)), \
- ((__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_splat"))))))))))))
-
-#define vec_vspltw(a1, a2) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector __bool int, (a1)), \
- ((__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector signed int, (a1)), \
- ((__vector signed int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector unsigned int, (a1)), \
- ((__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vspltw")))))
-
-#define vec_vsplth(a1, a2) \
-__ch (__un_args_eq (__vector __bool short, (a1)), \
- ((__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector signed short, (a1)), \
- ((__vector signed short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector unsigned short, (a1)), \
- ((__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector __pixel, (a1)), \
- ((__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsplth")))))
-
-#define vec_vspltb(a1, a2) \
-__ch (__un_args_eq (__vector __bool char, (a1)), \
- ((__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector signed char, (a1)), \
- ((__vector signed char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
-__ch (__un_args_eq (__vector unsigned char, (a1)), \
- ((__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vspltb"))))
-
-#define vec_splat_s8(a1) ((__vector signed char) __builtin_altivec_vspltisb (a1))
-
-#define vec_splat_s16(a1) ((__vector signed short) __builtin_altivec_vspltish (a1))
-
-#define vec_splat_s32(a1) ((__vector signed int) __builtin_altivec_vspltisw (a1))
-
-#define vec_splat_u8(a1) ((__vector unsigned char) __builtin_altivec_vspltisb (a1))
-
-#define vec_splat_u16(a1) ((__vector unsigned short) __builtin_altivec_vspltish (a1))
-
-#define vec_splat_u32(a1) ((__vector unsigned int) __builtin_altivec_vspltisw (a1))
-
-#define vec_sr(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_sr")))))))
-
-#define vec_vsrw(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsrw")))
-
-#define vec_vsrh(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsrh")))
-
-#define vec_vsrb(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsrb")))
-
-#define vec_sra(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_sra")))))))
-
-#define vec_vsraw(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsraw")))
-
-#define vec_vsrah(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsrah")))
-
-#define vec_vsrab(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsrab")))
-
-#define vec_srl(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned short, (a2)), \
- ((__vector signed int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
- ((__vector signed int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned int, (a2)), \
- ((__vector signed short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
- ((__vector signed short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned int, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned short, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned int, (a2)), \
- ((__vector signed char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned short, (a2)), \
- ((__vector signed char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned int, (a2)), \
- ((__vector __bool char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned short, (a2)), \
- ((__vector __bool char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_srl")))))))))))))))))))))))))))))))
-
-#define vec_sro(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector signed char, (a2)), \
- ((__vector float) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector unsigned char, (a2)), \
- ((__vector float) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed char, (a2)), \
- ((__vector signed int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
- ((__vector signed int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector signed char, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed char, (a2)), \
- ((__vector signed short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
- ((__vector signed short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector signed char, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
- ((__vector __pixel) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector signed char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector signed char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_sro")))))))))))))))))
-
-#define vec_st(a1, a2, a3) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), unsigned char, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed char, (a1), signed char, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), unsigned char, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), signed char, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), unsigned short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed short, (a1), short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), unsigned short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), unsigned short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), short, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), unsigned int, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed int, (a1), int, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), unsigned int, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), int, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector float, (a1), float, *(a3)), \
- __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__builtin_altivec_compiletime_error ("vec_st")))))))))))))))))))))))))))
-
-#define vec_stl(a1, a2, a3) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), unsigned char, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed char, (a1), signed char, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), unsigned char, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), signed char, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), unsigned short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed short, (a1), short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), unsigned short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), unsigned short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), short, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), unsigned int, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector signed int, (a1), int, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), unsigned int, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), int, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__ch (__bin_args_eq (__vector float, (a1), float, *(a3)), \
- __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
-__builtin_altivec_compiletime_error ("vec_stl")))))))))))))))))))))))))))
-
-#define vec_ste(a, b, c) \
-__ch (__bin_args_eq (__vector unsigned char, (a), unsigned char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector signed char, (a), signed char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __bool char, (a), unsigned char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __bool char, (a), signed char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector unsigned short, (a), unsigned short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector signed short, (a), short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __bool short, (a), unsigned short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __bool short, (a), short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __pixel, (a), unsigned short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __pixel, (a), short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector unsigned int, (a), unsigned int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector signed int, (a), int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __bool int, (a), unsigned int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector __bool int, (a), int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
-__ch (__bin_args_eq (__vector float, (a), float, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
- __builtin_altivec_compiletime_error ("vec_ste"))))))))))))))))
-
-#define vec_stvewx(a, b, c) \
-__ch (__bin_args_eq (__vector unsigned int, (a), unsigned int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector signed int, (a), int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __bool int, (a), unsigned int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __bool int, (a), int, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector float, (a), float, *(c)), \
- __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
-__builtin_altivec_compiletime_error ("vec_stvewx"))))))
-
-#define vec_stvehx(a, b, c) \
-__ch (__bin_args_eq (__vector unsigned short, (a), unsigned short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector signed short, (a), short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __bool short, (a), unsigned short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __bool short, (a), short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __pixel, (a), unsigned short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __pixel, (a), short, *(c)), \
- __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
-__builtin_altivec_compiletime_error ("vec_stvehx")))))))
-
-#define vec_stvebx(a, b, c) \
-__ch (__bin_args_eq (__vector unsigned char, (a), unsigned char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector signed char, (a), signed char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __bool char, (a), unsigned char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
-__ch (__bin_args_eq (__vector __bool char, (a), signed char, *(c)), \
- __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
-__builtin_altivec_compiletime_error ("vec_stvebx")))))
-
-#define vec_sub(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vsubfp ((__vector float) (a1), (__vector float) (a2))), \
- __builtin_altivec_compiletime_error ("vec_sub"))))))))))))))))))))
-
-#define vec_vsubfp(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vsubfp ((__vector float) (a1), (__vector float) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubfp"))
-
-#define vec_vsubuwm(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubuwm")))))))
-
-#define vec_vsubuhm(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubuhm")))))))
-
-#define vec_vsububm(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsububm")))))))
-
-#define vec_subc(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubcuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_subc"))
-
-#define vec_subs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_subs")))))))))))))))))))
-
-#define vec_vsubsws(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubsws"))))
-
-#define vec_vsubuws(a1, a2) \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubuws"))))
-
-#define vec_vsubshs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubshs"))))
-
-#define vec_vsubuhs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubuhs"))))
-
-#define vec_vsubsbs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsubsbs"))))
-
-#define vec_vsububs(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsububs"))))
-
-#define vec_sum4s(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_sum4s"))))
-
-#define vec_vsum4shs(a1, a2) \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsum4shs"))
-
-#define vec_vsum4sbs(a1, a2) \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsum4sbs"))
-
-#define vec_vsum4ubs(a1, a2) \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_vsum4ubs"))
-
-#define vec_sum2s(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsum2sws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_sum2s"))
-
-#define vec_sums(a1, a2) \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vsumsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__builtin_altivec_compiletime_error ("vec_sums"))
-
-#define vec_trunc(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- ((__vector float) __builtin_altivec_vrfiz ((__vector float) (a1))), \
-__builtin_altivec_compiletime_error ("vec_trunc"))
-
-#define vec_unpackh(a1) \
-__ch (__un_args_eq (__vector signed char, (a1)), \
- ((__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
-__ch (__un_args_eq (__vector __bool char, (a1)), \
- ((__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
-__ch (__un_args_eq (__vector __pixel, (a1)), \
- ((__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) (a1))), \
-__ch (__un_args_eq (__vector signed short, (a1)), \
- ((__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
-__ch (__un_args_eq (__vector __bool short, (a1)), \
- ((__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
- __builtin_altivec_compiletime_error ("vec_unpackh"))))))
-
-#define vec_vupkhsh(a1) \
-__ch (__un_args_eq (__vector __bool short, (a1)), \
- ((__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
-__ch (__un_args_eq (__vector signed short, (a1)), \
- ((__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
-__builtin_altivec_compiletime_error ("vec_vupkhsh")))
-
-#define vec_vupkhpx(a1) \
-__ch (__un_args_eq (__vector __pixel, (a1)), \
- ((__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) (a1))), \
-__builtin_altivec_compiletime_error ("vec_vupkhpx"))
-
-#define vec_vupkhsb(a1) \
-__ch (__un_args_eq (__vector __bool char, (a1)), \
- ((__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
-__ch (__un_args_eq (__vector signed char, (a1)), \
- ((__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
-__builtin_altivec_compiletime_error ("vec_vupkhsb")))
-
-#define vec_unpackl(a1) \
-__ch (__un_args_eq (__vector signed char, (a1)), \
- ((__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
-__ch (__un_args_eq (__vector __bool char, (a1)), \
- ((__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
-__ch (__un_args_eq (__vector __pixel, (a1)), \
- ((__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) (a1))), \
-__ch (__un_args_eq (__vector signed short, (a1)), \
- ((__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
-__ch (__un_args_eq (__vector __bool short, (a1)), \
- ((__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
- __builtin_altivec_compiletime_error ("vec_unpackl"))))))
-
-#define vec_vupklsh(a1) \
-__ch (__un_args_eq (__vector __bool short, (a1)), \
- ((__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
-__ch (__un_args_eq (__vector signed short, (a1)), \
- ((__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
-__builtin_altivec_compiletime_error ("vec_vupklsh")))
-
-#define vec_vupklpx(a1) \
-__ch (__un_args_eq (__vector __pixel, (a1)), \
- ((__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) (a1))), \
-__builtin_altivec_compiletime_error ("vec_vupklpx"))
-
-#define vec_vupklsb(a1) \
-__ch (__un_args_eq (__vector __bool char, (a1)), \
- ((__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
-__ch (__un_args_eq (__vector signed char, (a1)), \
- ((__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
-__builtin_altivec_compiletime_error ("vec_vupklsb")))
-
-#define vec_xor(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
- ((__vector float) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
- ((__vector float) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- ((__vector __bool int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- ((__vector signed int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- ((__vector signed int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- ((__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- ((__vector __bool short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- ((__vector signed short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- ((__vector signed short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- ((__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- ((__vector __bool char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- ((__vector signed char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- ((__vector signed char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- ((__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_xor")))))))))))))))))))))))))
-
-/* Predicates. */
-
-#define vec_all_eq(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_LT, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_all_eq"))))))))))))))))))))))))
-
-#define vec_all_ge(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_LT, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_all_ge"))))))))))))))))))))
-
-#define vec_all_gt(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_LT, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_all_gt"))))))))))))))))))))
-
-#define vec_all_in(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2)), \
- __builtin_altivec_compiletime_error ("vec_all_in"))
-
-#define vec_all_le(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_LT, (__vector float) (a2), (__vector float) (a1)), \
- __builtin_altivec_compiletime_error ("vec_all_le"))))))))))))))))))))
-
-#define vec_all_lt(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_LT, (__vector float) (a2), (__vector float) (a1)), \
- __builtin_altivec_compiletime_error ("vec_all_lt"))))))))))))))))))))
-
-#define vec_all_nan(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1)), \
- __builtin_altivec_compiletime_error ("vec_all_nan"))
-
-#define vec_all_ne(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_all_ne"))))))))))))))))))))))))
-
-#define vec_all_nge(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a1), (a2)), \
- __builtin_altivec_compiletime_error ("vec_all_nge"))
-
-#define vec_all_ngt(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a1), (a2)), \
- __builtin_altivec_compiletime_error ("vec_all_ngt"))
-
-#define vec_all_nle(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a2), (a1)), \
- __builtin_altivec_compiletime_error ("vec_all_nle"))
-
-#define vec_all_nlt(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a2), (a1)), \
- __builtin_altivec_compiletime_error ("vec_all_nlt"))
-
-#define vec_all_numeric(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_LT, (a1), (a1)), \
- __builtin_altivec_compiletime_error ("vec_all_numeric"))
-
-#define vec_any_eq(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_any_eq"))))))))))))))))))))))))
-
-#define vec_any_ge(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_any_ge"))))))))))))))))))))
-
-#define vec_any_gt(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_any_gt"))))))))))))))))))))
-
-#define vec_any_le(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (__vector float) (a2), (__vector float) (a1)), \
- __builtin_altivec_compiletime_error ("vec_any_le"))))))))))))))))))))
-
-#define vec_any_lt(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (__vector float) (a2), (__vector float) (a1)), \
- __builtin_altivec_compiletime_error ("vec_any_lt"))))))))))))))))))))
-
-#define vec_any_nan(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (a1), (a1)), \
- __builtin_altivec_compiletime_error ("vec_any_nan"))
-
-#define vec_any_ne(a1, a2) \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (__vector float) (a1), (__vector float) (a2)), \
- __builtin_altivec_compiletime_error ("vec_any_ne"))))))))))))))))))))))))
-
-#define vec_any_nge(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a1), (a2)), \
- __builtin_altivec_compiletime_error ("vec_any_nge"))
-
-#define vec_any_ngt(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a1), (a2)), \
- __builtin_altivec_compiletime_error ("vec_any_ngt"))
-
-#define vec_any_nle(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a2), (a1)), \
- __builtin_altivec_compiletime_error ("vec_any_nle"))
-
-#define vec_any_nlt(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a2), (a1)), \
- __builtin_altivec_compiletime_error ("vec_any_nlt"))
-
-#define vec_any_numeric(a1) \
-__ch (__un_args_eq (__vector float, (a1)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (a1), (a1)), \
- __builtin_altivec_compiletime_error ("vec_any_numeric"))
-
-#define vec_any_out(a1, a2) \
-__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
- __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2)), \
- __builtin_altivec_compiletime_error ("vec_any_out"))
-
-
-#endif /* __cplusplus */
+#define vec_mfvscr() ((__vector unsigned short) __builtin_altivec_mfvscr ())
+#define vec_splat_s8(x) __builtin_altivec_vspltisb ((x))
+#define vec_splat_s16(x) __builtin_altivec_vspltish ((x))
+#define vec_splat_s32(x) __builtin_altivec_vspltisw ((x))
+#define vec_splat_u8(x) ((__vector unsigned char) vec_splat_s8 ((x)))
+#define vec_splat_u16(x) ((__vector unsigned short) vec_splat_s16 ((x)))
+#define vec_splat_u32(x) ((__vector unsigned int) vec_splat_s32 ((x)))
+
+/* This also accepts a type for its parameter, so it is not enough
+ to #define vec_step to __builtin_vec_step. */
+#define vec_step(x) __builtin_vec_step (* (__typeof__ (x) *) 0)
#endif /* _ALTIVEC_H */
diff --git a/contrib/gcc/config/rs6000/altivec.md b/contrib/gcc/config/rs6000/altivec.md
index 505a573..e032685 100644
--- a/contrib/gcc/config/rs6000/altivec.md
+++ b/contrib/gcc/config/rs6000/altivec.md
@@ -1,5 +1,5 @@
;; AltiVec patterns.
-;; Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
;; This file is part of GCC.
@@ -16,100 +16,178 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_constants
- [(UNSPEC_VSPLTISW 141)
- (UNSPEC_VSPLTISH 140)
- (UNSPEC_VSPLTISB 139)
+ [(UNSPEC_VCMPBFP 50)
+ (UNSPEC_VCMPEQUB 51)
+ (UNSPEC_VCMPEQUH 52)
+ (UNSPEC_VCMPEQUW 53)
+ (UNSPEC_VCMPEQFP 54)
+ (UNSPEC_VCMPGEFP 55)
+ (UNSPEC_VCMPGTUB 56)
+ (UNSPEC_VCMPGTSB 57)
+ (UNSPEC_VCMPGTUH 58)
+ (UNSPEC_VCMPGTSH 59)
+ (UNSPEC_VCMPGTUW 60)
+ (UNSPEC_VCMPGTSW 61)
+ (UNSPEC_VCMPGTFP 62)
+ (UNSPEC_VMSUMU 65)
+ (UNSPEC_VMSUMM 66)
+ (UNSPEC_VMSUMSHM 68)
+ (UNSPEC_VMSUMUHS 69)
+ (UNSPEC_VMSUMSHS 70)
+ (UNSPEC_VMHADDSHS 71)
+ (UNSPEC_VMHRADDSHS 72)
+ (UNSPEC_VMLADDUHM 73)
+ (UNSPEC_VADDCUW 75)
+ (UNSPEC_VADDU 76)
+ (UNSPEC_VADDS 77)
+ (UNSPEC_VAVGU 80)
+ (UNSPEC_VAVGS 81)
+ (UNSPEC_VMULEUB 83)
+ (UNSPEC_VMULESB 84)
+ (UNSPEC_VMULEUH 85)
+ (UNSPEC_VMULESH 86)
+ (UNSPEC_VMULOUB 87)
+ (UNSPEC_VMULOSB 88)
+ (UNSPEC_VMULOUH 89)
+ (UNSPEC_VMULOSH 90)
+ (UNSPEC_VPKUHUM 93)
+ (UNSPEC_VPKUWUM 94)
+ (UNSPEC_VPKPX 95)
+ (UNSPEC_VPKSHSS 97)
+ (UNSPEC_VPKSWSS 99)
+ (UNSPEC_VPKUHUS 100)
+ (UNSPEC_VPKSHUS 101)
+ (UNSPEC_VPKUWUS 102)
+ (UNSPEC_VPKSWUS 103)
+ (UNSPEC_VRL 104)
+ (UNSPEC_VSL 107)
+ (UNSPEC_VSLV4SI 110)
+ (UNSPEC_VSLO 111)
+ (UNSPEC_VSR 118)
+ (UNSPEC_VSRO 119)
+ (UNSPEC_VSUBCUW 124)
+ (UNSPEC_VSUBU 125)
+ (UNSPEC_VSUBS 126)
+ (UNSPEC_VSUM4UBS 131)
+ (UNSPEC_VSUM4S 132)
+ (UNSPEC_VSUM2SWS 134)
+ (UNSPEC_VSUMSWS 135)
+ (UNSPEC_VPERM 144)
+ (UNSPEC_VRFIP 148)
+ (UNSPEC_VRFIN 149)
+ (UNSPEC_VRFIM 150)
+ (UNSPEC_VCFUX 151)
+ (UNSPEC_VCFSX 152)
+ (UNSPEC_VCTUXS 153)
+ (UNSPEC_VCTSXS 154)
+ (UNSPEC_VLOGEFP 155)
+ (UNSPEC_VEXPTEFP 156)
+ (UNSPEC_VRSQRTEFP 157)
+ (UNSPEC_VREFP 158)
+ (UNSPEC_VSEL4SI 159)
+ (UNSPEC_VSEL4SF 160)
+ (UNSPEC_VSEL8HI 161)
+ (UNSPEC_VSEL16QI 162)
+ (UNSPEC_VLSDOI 163)
+ (UNSPEC_VUPKHSB 167)
+ (UNSPEC_VUPKHPX 168)
+ (UNSPEC_VUPKHSH 169)
+ (UNSPEC_VUPKLSB 170)
+ (UNSPEC_VUPKLPX 171)
+ (UNSPEC_VUPKLSH 172)
+ (UNSPEC_PREDICATE 173)
+ (UNSPEC_DST 190)
+ (UNSPEC_DSTT 191)
+ (UNSPEC_DSTST 192)
+ (UNSPEC_DSTSTT 193)
+ (UNSPEC_LVSL 194)
+ (UNSPEC_LVSR 195)
+ (UNSPEC_LVE 196)
+ (UNSPEC_STVX 201)
+ (UNSPEC_STVXL 202)
+ (UNSPEC_STVE 203)
+ (UNSPEC_SET_VSCR 213)
+ (UNSPEC_GET_VRSAVE 214)
+ (UNSPEC_REALIGN_LOAD 215)
+ (UNSPEC_REDUC_PLUS 217)
+ (UNSPEC_VECSH 219)
+ (UNSPEC_VCOND_V4SI 301)
+ (UNSPEC_VCOND_V4SF 302)
+ (UNSPEC_VCOND_V8HI 303)
+ (UNSPEC_VCOND_V16QI 304)
+ (UNSPEC_VCONDU_V4SI 305)
+ (UNSPEC_VCONDU_V8HI 306)
+ (UNSPEC_VCONDU_V16QI 307)
])
-;; Generic LVX load instruction.
-(define_insn "altivec_lvx_4si"
- [(set (match_operand:V4SI 0 "altivec_register_operand" "=v")
- (match_operand:V4SI 1 "memory_operand" "m"))]
- "TARGET_ALTIVEC"
- "lvx %0,%y1"
- [(set_attr "type" "vecload")])
-
-(define_insn "altivec_lvx_8hi"
- [(set (match_operand:V8HI 0 "altivec_register_operand" "=v")
- (match_operand:V8HI 1 "memory_operand" "m"))]
- "TARGET_ALTIVEC"
- "lvx %0,%y1"
- [(set_attr "type" "vecload")])
-
-(define_insn "altivec_lvx_16qi"
- [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
- (match_operand:V16QI 1 "memory_operand" "m"))]
- "TARGET_ALTIVEC"
- "lvx %0,%y1"
- [(set_attr "type" "vecload")])
+(define_constants
+ [(UNSPECV_SET_VRSAVE 30)
+ (UNSPECV_MTVSCR 186)
+ (UNSPECV_MFVSCR 187)
+ (UNSPECV_DSSALL 188)
+ (UNSPECV_DSS 189)
+ ])
+
+;; Vec int modes
+(define_mode_macro VI [V4SI V8HI V16QI])
+;; Short vec in modes
+(define_mode_macro VIshort [V8HI V16QI])
+;; Vec float modes
+(define_mode_macro VF [V4SF])
+;; Vec modes, pity mode macros are not composable
+(define_mode_macro V [V4SI V8HI V16QI V4SF])
+
+(define_mode_attr VI_char [(V4SI "w") (V8HI "h") (V16QI "b")])
-(define_insn "altivec_lvx_4sf"
- [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
- (match_operand:V4SF 1 "memory_operand" "m"))]
+;; Generic LVX load instruction.
+(define_insn "altivec_lvx_<mode>"
+ [(set (match_operand:V 0 "altivec_register_operand" "=v")
+ (match_operand:V 1 "memory_operand" "Z"))]
"TARGET_ALTIVEC"
"lvx %0,%y1"
[(set_attr "type" "vecload")])
;; Generic STVX store instruction.
-(define_insn "altivec_stvx_4si"
- [(set (match_operand:V4SI 0 "memory_operand" "=m")
- (match_operand:V4SI 1 "altivec_register_operand" "v"))]
- "TARGET_ALTIVEC"
- "stvx %1,%y0"
- [(set_attr "type" "vecstore")])
-
-(define_insn "altivec_stvx_8hi"
- [(set (match_operand:V8HI 0 "memory_operand" "=m")
- (match_operand:V8HI 1 "altivec_register_operand" "v"))]
- "TARGET_ALTIVEC"
- "stvx %1,%y0"
- [(set_attr "type" "vecstore")])
-
-(define_insn "altivec_stvx_16qi"
- [(set (match_operand:V16QI 0 "memory_operand" "=m")
- (match_operand:V16QI 1 "altivec_register_operand" "v"))]
- "TARGET_ALTIVEC"
- "stvx %1,%y0"
- [(set_attr "type" "vecstore")])
-
-(define_insn "altivec_stvx_4sf"
- [(set (match_operand:V4SF 0 "memory_operand" "=m")
- (match_operand:V4SF 1 "altivec_register_operand" "v"))]
+(define_insn "altivec_stvx_<mode>"
+ [(set (match_operand:V 0 "memory_operand" "=Z")
+ (match_operand:V 1 "altivec_register_operand" "v"))]
"TARGET_ALTIVEC"
"stvx %1,%y0"
[(set_attr "type" "vecstore")])
;; Vector move instructions.
-(define_expand "movv4si"
- [(set (match_operand:V4SI 0 "nonimmediate_operand" "")
- (match_operand:V4SI 1 "any_operand" ""))]
+(define_expand "mov<mode>"
+ [(set (match_operand:V 0 "nonimmediate_operand" "")
+ (match_operand:V 1 "any_operand" ""))]
"TARGET_ALTIVEC"
- "{ rs6000_emit_move (operands[0], operands[1], V4SImode); DONE; }")
+{
+ rs6000_emit_move (operands[0], operands[1], <MODE>mode);
+ DONE;
+})
-(define_insn "*movv4si_internal"
- [(set (match_operand:V4SI 0 "nonimmediate_operand" "=m,v,v,o,r,r,v")
- (match_operand:V4SI 1 "input_operand" "v,m,v,r,o,r,W"))]
+(define_insn "*mov<mode>_internal"
+ [(set (match_operand:V 0 "nonimmediate_operand" "=Z,v,v,o,r,r,v")
+ (match_operand:V 1 "input_operand" "v,Z,v,r,o,r,W"))]
"TARGET_ALTIVEC
- && (register_operand (operands[0], V4SImode)
- || register_operand (operands[1], V4SImode))"
- "*
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
{
switch (which_alternative)
{
- case 0: return \"stvx %1,%y0\";
- case 1: return \"lvx %0,%y1\";
- case 2: return \"vor %0,%1,%1\";
- case 3: return \"#\";
- case 4: return \"#\";
- case 5: return \"#\";
+ case 0: return "stvx %1,%y0";
+ case 1: return "lvx %0,%y1";
+ case 2: return "vor %0,%1,%1";
+ case 3: return "#";
+ case 4: return "#";
+ case 5: return "#";
case 6: return output_vec_const_move (operands);
- default: abort();
+ default: gcc_unreachable ();
}
-}"
+}
[(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,*")])
(define_split
@@ -118,48 +196,9 @@
"TARGET_ALTIVEC && reload_completed
&& gpr_or_gpr_p (operands[0], operands[1])"
[(pc)]
-{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
-
-(define_split
- [(set (match_operand:V4SI 0 "altivec_register_operand" "")
- (match_operand:V4SI 1 "easy_vector_constant_add_self" ""))]
- "TARGET_ALTIVEC && reload_completed"
- [(set (match_dup 0) (match_dup 3))
- (set (match_dup 0)
- (plus:V4SI (match_dup 0)
- (match_dup 0)))]
- "
-{
- operands[3] = gen_easy_vector_constant_add_self (operands[1]);
-}")
-
-(define_expand "movv8hi"
- [(set (match_operand:V8HI 0 "nonimmediate_operand" "")
- (match_operand:V8HI 1 "any_operand" ""))]
- "TARGET_ALTIVEC"
- "{ rs6000_emit_move (operands[0], operands[1], V8HImode); DONE; }")
-
-(define_insn "*movv8hi_internal1"
- [(set (match_operand:V8HI 0 "nonimmediate_operand" "=m,v,v,o,r,r,v")
- (match_operand:V8HI 1 "input_operand" "v,m,v,r,o,r,W"))]
- "TARGET_ALTIVEC
- && (register_operand (operands[0], V8HImode)
- || register_operand (operands[1], V8HImode))"
- "*
{
- switch (which_alternative)
- {
- case 0: return \"stvx %1,%y0\";
- case 1: return \"lvx %0,%y1\";
- case 2: return \"vor %0,%1,%1\";
- case 3: return \"#\";
- case 4: return \"#\";
- case 5: return \"#\";
- case 6: return output_vec_const_move (operands);
- default: abort ();
- }
-}"
- [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,*")])
+ rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+})
(define_split
[(set (match_operand:V8HI 0 "nonimmediate_operand" "")
@@ -170,47 +209,6 @@
{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
(define_split
- [(set (match_operand:V8HI 0 "altivec_register_operand" "")
- (match_operand:V8HI 1 "easy_vector_constant_add_self" ""))]
- "TARGET_ALTIVEC && reload_completed"
- [(set (match_dup 0) (match_dup 3))
- (set (match_dup 0)
- (plus:V8HI (match_dup 0)
- (match_dup 0)))]
- "
-{
- operands[3] = gen_easy_vector_constant_add_self (operands[1]);
-}")
-
-(define_expand "movv16qi"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "")
- (match_operand:V16QI 1 "any_operand" ""))]
- "TARGET_ALTIVEC"
- "{ rs6000_emit_move (operands[0], operands[1], V16QImode); DONE; }")
-
-(define_insn "*movv16qi_internal1"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "=m,v,v,o,r,r,v")
- (match_operand:V16QI 1 "input_operand" "v,m,v,r,o,r,W"))]
- "TARGET_ALTIVEC
- && (register_operand (operands[0], V16QImode)
- || register_operand (operands[1], V16QImode))"
- "*
-{
- switch (which_alternative)
- {
- case 0: return \"stvx %1,%y0\";
- case 1: return \"lvx %0,%y1\";
- case 2: return \"vor %0,%1,%1\";
- case 3: return \"#\";
- case 4: return \"#\";
- case 5: return \"#\";
- case 6: return output_vec_const_move (operands);
- default: abort ();
- }
-}"
- [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,*")])
-
-(define_split
[(set (match_operand:V16QI 0 "nonimmediate_operand" "")
(match_operand:V16QI 1 "input_operand" ""))]
"TARGET_ALTIVEC && reload_completed
@@ -219,106 +217,91 @@
{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
(define_split
- [(set (match_operand:V16QI 0 "altivec_register_operand" "")
- (match_operand:V16QI 1 "easy_vector_constant_add_self" ""))]
- "TARGET_ALTIVEC && reload_completed"
- [(set (match_dup 0) (match_dup 3))
- (set (match_dup 0)
- (plus:V16QI (match_dup 0)
- (match_dup 0)))]
- "
-{
- operands[3] = gen_easy_vector_constant_add_self (operands[1]);
-}")
-
-(define_expand "movv4sf"
- [(set (match_operand:V4SF 0 "nonimmediate_operand" "")
- (match_operand:V4SF 1 "any_operand" ""))]
- "TARGET_ALTIVEC"
- "{ rs6000_emit_move (operands[0], operands[1], V4SFmode); DONE; }")
-
-(define_insn "*movv4sf_internal1"
- [(set (match_operand:V4SF 0 "nonimmediate_operand" "=m,v,v,o,r,r,v")
- (match_operand:V4SF 1 "input_operand" "v,m,v,r,o,r,W"))]
- "TARGET_ALTIVEC
- && (register_operand (operands[0], V4SFmode)
- || register_operand (operands[1], V4SFmode))"
- "*
-{
- switch (which_alternative)
- {
- case 0: return \"stvx %1,%y0\";
- case 1: return \"lvx %0,%y1\";
- case 2: return \"vor %0,%1,%1\";
- case 3: return \"#\";
- case 4: return \"#\";
- case 5: return \"#\";
- case 6: return output_vec_const_move (operands);
- default: abort ();
- }
-}"
- [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,*")])
-
-(define_split
[(set (match_operand:V4SF 0 "nonimmediate_operand" "")
(match_operand:V4SF 1 "input_operand" ""))]
"TARGET_ALTIVEC && reload_completed
&& gpr_or_gpr_p (operands[0], operands[1])"
[(pc)]
-{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+{
+ rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+})
+
+(define_split
+ [(set (match_operand:VI 0 "altivec_register_operand" "")
+ (match_operand:VI 1 "easy_vector_constant_add_self" ""))]
+ "TARGET_ALTIVEC && reload_completed"
+ [(set (match_dup 0) (match_dup 3))
+ (set (match_dup 0) (plus:VI (match_dup 0)
+ (match_dup 0)))]
+{
+ rtx dup = gen_easy_altivec_constant (operands[1]);
+ rtx const_vec;
+
+ /* Divide the operand of the resulting VEC_DUPLICATE, and use
+ simplify_rtx to make a CONST_VECTOR. */
+ XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
+ XEXP (dup, 0), const1_rtx);
+ const_vec = simplify_rtx (dup);
+
+ if (GET_MODE (const_vec) == <MODE>mode)
+ operands[3] = const_vec;
+ else
+ operands[3] = gen_lowpart (<MODE>mode, const_vec);
+})
(define_insn "get_vrsave_internal"
[(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(reg:SI 109)] 214))]
+ (unspec:SI [(reg:SI 109)] UNSPEC_GET_VRSAVE))]
"TARGET_ALTIVEC"
- "*
{
if (TARGET_MACHO)
- return \"mfspr %0,256\";
+ return "mfspr %0,256";
else
- return \"mfvrsave %0\";
-}"
+ return "mfvrsave %0";
+}
[(set_attr "type" "*")])
(define_insn "*set_vrsave_internal"
[(match_parallel 0 "vrsave_operation"
[(set (reg:SI 109)
(unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
- (reg:SI 109)] 30))])]
+ (reg:SI 109)] UNSPECV_SET_VRSAVE))])]
"TARGET_ALTIVEC"
- "*
{
if (TARGET_MACHO)
- return \"mtspr 256,%1\";
+ return "mtspr 256,%1";
else
- return \"mtvrsave %1\";
-}"
+ return "mtvrsave %1";
+}
[(set_attr "type" "*")])
-;; Simple binary operations.
+(define_insn "*save_world"
+ [(match_parallel 0 "save_world_operation"
+ [(clobber (match_operand:SI 1 "register_operand" "=l"))
+ (use (match_operand:SI 2 "call_operand" "s"))])]
+ "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
+ "bl %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*restore_world"
+ [(match_parallel 0 "restore_world_operation"
+ [(return)
+ (use (match_operand:SI 1 "register_operand" "l"))
+ (use (match_operand:SI 2 "call_operand" "s"))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" "=r"))])]
+ "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
+ "b %z2")
-(define_insn "addv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (plus:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vaddubm %0,%1,%2"
- [(set_attr "type" "vecsimple")])
+;; Simple binary operations.
-(define_insn "addv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (plus:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")))]
+;; add
+(define_insn "add<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (plus:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vadduhm %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "addv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (plus:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vadduwm %0,%1,%2"
+ "vaddu<VI_char>m %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "addv4sf3"
@@ -332,133 +315,102 @@
(define_insn "altivec_vaddcuw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 35))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VADDCUW))]
"TARGET_ALTIVEC"
"vaddcuw %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vaddubs"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 36))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vaddubs %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vaddsbs"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 37))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+(define_insn "altivec_vaddu<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VADDU))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
- "vaddsbs %0,%1,%2"
+ "vaddu<VI_char>s %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vadduhs"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 38))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+(define_insn "altivec_vadds<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VADDS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
- "vadduhs %0,%1,%2"
+ "vadds<VI_char>s %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vaddshs"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 39))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+;; sub
+(define_insn "sub<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (minus:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vaddshs %0,%1,%2"
+ "vsubu<VI_char>m %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vadduws"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 40))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+(define_insn "subv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vadduws %0,%1,%2"
- [(set_attr "type" "vecsimple")])
+ "vsubfp %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
-(define_insn "altivec_vaddsws"
+(define_insn "altivec_vsubcuw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 41))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vaddsws %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "andv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (and:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vand %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vandc"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (and:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (not:V4SI (match_operand:V4SI 2 "register_operand" "v"))))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUBCUW))]
"TARGET_ALTIVEC"
- "vandc %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vavgub"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 44))]
- "TARGET_ALTIVEC"
- "vavgub %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vavgsb"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 45))]
- "TARGET_ALTIVEC"
- "vavgsb %0,%1,%2"
+ "vsubcuw %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vavguh"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 46))]
+(define_insn "altivec_vsubu<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSUBU))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
- "vavguh %0,%1,%2"
+ "vsubu<VI_char>s %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vavgsh"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 47))]
+(define_insn "altivec_vsubs<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSUBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
- "vavgsh %0,%1,%2"
+ "vsubs<VI_char>s %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vavguw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 48))]
+;;
+(define_insn "altivec_vavgu<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VAVGU))]
"TARGET_ALTIVEC"
- "vavguw %0,%1,%2"
+ "vavgu<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vavgsw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 49))]
+(define_insn "altivec_vavgs<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VAVGS))]
"TARGET_ALTIVEC"
- "vavgsw %0,%1,%2"
+ "vavgs<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_vcmpbfp"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 50))]
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPBFP))]
"TARGET_ALTIVEC"
"vcmpbfp %0,%1,%2"
[(set_attr "type" "veccmp")])
@@ -466,7 +418,8 @@
(define_insn "altivec_vcmpequb"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 51))]
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUB))]
"TARGET_ALTIVEC"
"vcmpequb %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -474,7 +427,8 @@
(define_insn "altivec_vcmpequh"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 52))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUH))]
"TARGET_ALTIVEC"
"vcmpequh %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -482,7 +436,8 @@
(define_insn "altivec_vcmpequw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 53))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUW))]
"TARGET_ALTIVEC"
"vcmpequw %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -490,7 +445,8 @@
(define_insn "altivec_vcmpeqfp"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 54))]
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPEQFP))]
"TARGET_ALTIVEC"
"vcmpeqfp %0,%1,%2"
[(set_attr "type" "veccmp")])
@@ -498,7 +454,8 @@
(define_insn "altivec_vcmpgefp"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 55))]
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPGEFP))]
"TARGET_ALTIVEC"
"vcmpgefp %0,%1,%2"
[(set_attr "type" "veccmp")])
@@ -506,7 +463,8 @@
(define_insn "altivec_vcmpgtub"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 56))]
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUB))]
"TARGET_ALTIVEC"
"vcmpgtub %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -514,7 +472,8 @@
(define_insn "altivec_vcmpgtsb"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 57))]
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSB))]
"TARGET_ALTIVEC"
"vcmpgtsb %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -522,7 +481,8 @@
(define_insn "altivec_vcmpgtuh"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 58))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUH))]
"TARGET_ALTIVEC"
"vcmpgtuh %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -530,7 +490,8 @@
(define_insn "altivec_vcmpgtsh"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 59))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSH))]
"TARGET_ALTIVEC"
"vcmpgtsh %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -538,7 +499,8 @@
(define_insn "altivec_vcmpgtuw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 60))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUW))]
"TARGET_ALTIVEC"
"vcmpgtuw %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -546,7 +508,8 @@
(define_insn "altivec_vcmpgtsw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 61))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSW))]
"TARGET_ALTIVEC"
"vcmpgtsw %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -554,7 +517,8 @@
(define_insn "altivec_vcmpgtfp"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 62))]
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPGTFP))]
"TARGET_ALTIVEC"
"vcmpgtfp %0,%1,%2"
[(set_attr "type" "veccmp")])
@@ -581,16 +545,75 @@
rtx neg0;
/* Generate [-0.0, -0.0, -0.0, -0.0]. */
- neg0 = gen_reg_rtx (V4SFmode);
- emit_insn (gen_altivec_vspltisw_v4sf (neg0, GEN_INT (-1)));
- emit_insn (gen_altivec_vslw_v4sf (neg0, neg0, neg0));
+ neg0 = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
+ emit_insn (gen_altivec_vslw (neg0, neg0, neg0));
/* Use the multiply-add. */
emit_insn (gen_altivec_vmaddfp (operands[0], operands[1], operands[2],
- neg0));
+ gen_lowpart (V4SFmode, neg0)));
DONE;
}")
+;; 32 bit integer multiplication
+;; A_high = Operand_0 & 0xFFFF0000 >> 16
+;; A_low = Operand_0 & 0xFFFF
+;; B_high = Operand_1 & 0xFFFF0000 >> 16
+;; B_low = Operand_1 & 0xFFFF
+;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
+
+;; (define_insn "mulv4si3"
+;; [(set (match_operand:V4SI 0 "register_operand" "=v")
+;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
+;; (match_operand:V4SI 2 "register_operand" "v")))]
+(define_expand "mulv4si3"
+ [(use (match_operand:V4SI 0 "register_operand" ""))
+ (use (match_operand:V4SI 1 "register_operand" ""))
+ (use (match_operand:V4SI 2 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+ {
+ rtx zero;
+ rtx swap;
+ rtx small_swap;
+ rtx sixteen;
+ rtx one;
+ rtx two;
+ rtx low_product;
+ rtx high_product;
+
+ zero = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
+
+ sixteen = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
+
+ swap = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vrlw (swap, operands[2], sixteen));
+
+ one = gen_reg_rtx (V8HImode);
+ convert_move (one, operands[1], 0);
+
+ two = gen_reg_rtx (V8HImode);
+ convert_move (two, operands[2], 0);
+
+ small_swap = gen_reg_rtx (V8HImode);
+ convert_move (small_swap, swap, 0);
+
+ low_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmulouh (low_product, one, two));
+
+ high_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
+
+ emit_insn (gen_altivec_vslw (high_product, high_product, sixteen));
+
+ emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
+
+ DONE;
+ }")
+
+
;; Fused multiply subtract
(define_insn "altivec_vnmsubfp"
[(set (match_operand:V4SF 0 "register_operand" "=v")
@@ -601,39 +624,32 @@
"vnmsubfp %0,%1,%2,%3"
[(set_attr "type" "vecfloat")])
-
-(define_insn "altivec_vmsumubm"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 65))]
- "TARGET_ALTIVEC"
- "vmsumubm %0,%1,%2,%3"
- [(set_attr "type" "veccomplex")])
-
-(define_insn "altivec_vmsummbm"
+(define_insn "altivec_vmsumu<VI_char>m"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 66))]
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMU))]
"TARGET_ALTIVEC"
- "vmsummbm %0,%1,%2,%3"
+ "vmsumu<VI_char>m %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
-(define_insn "altivec_vmsumuhm"
+(define_insn "altivec_vmsumm<VI_char>m"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 67))]
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMM))]
"TARGET_ALTIVEC"
- "vmsumuhm %0,%1,%2,%3"
+ "vmsumm<VI_char>m %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
(define_insn "altivec_vmsumshm"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
(match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 68))]
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMSHM))]
"TARGET_ALTIVEC"
"vmsumshm %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
@@ -642,8 +658,9 @@
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
(match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 69))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMUHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vmsumuhs %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
@@ -652,91 +669,91 @@
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
(match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 70))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vmsumshs %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
-(define_insn "umaxv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (umax:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vmaxub %0,%1,%2"
- [(set_attr "type" "vecsimple")])
+;; max
-(define_insn "smaxv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (smax:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")))]
+(define_insn "umax<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (umax:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vmaxsb %0,%1,%2"
+ "vmaxu<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "umaxv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (umax:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")))]
+(define_insn "smax<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vmaxuh %0,%1,%2"
+ "vmaxs<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "smaxv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (smax:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")))]
+(define_insn "smaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vmaxsh %0,%1,%2"
- [(set_attr "type" "vecsimple")])
+ "vmaxfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
-(define_insn "umaxv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (umax:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
+(define_insn "umin<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (umin:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vmaxuw %0,%1,%2"
+ "vminu<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "smaxv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (smax:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
+(define_insn "smin<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (smin:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vmaxsw %0,%1,%2"
+ "vmins<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "smaxv4sf3"
+(define_insn "sminv4sf3"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
(match_operand:V4SF 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vmaxfp %0,%1,%2"
+ "vminfp %0,%1,%2"
[(set_attr "type" "veccmp")])
(define_insn "altivec_vmhaddshs"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
(match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V8HI 3 "register_operand" "v")] 71))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMHADDSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vmhaddshs %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
+
(define_insn "altivec_vmhraddshs"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
(match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V8HI 3 "register_operand" "v")] 72))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMHRADDSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vmhraddshs %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
+
(define_insn "altivec_vmladduhm"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
(match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V8HI 3 "register_operand" "v")] 73))]
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMLADDUHM))]
"TARGET_ALTIVEC"
"vmladduhm %0,%1,%2,%3"
[(set_attr "type" "veccomplex")])
@@ -744,24 +761,40 @@
(define_insn "altivec_vmrghb"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (parallel [(const_int 8)
+ (parallel [(const_int 0)
+ (const_int 8)
+ (const_int 1)
(const_int 9)
+ (const_int 2)
(const_int 10)
- (const_int 11)
+ (const_int 3)
+ (const_int 11)
+ (const_int 4)
(const_int 12)
+ (const_int 5)
(const_int 13)
- (const_int 14)
- (const_int 15)
+ (const_int 6)
+ (const_int 14)
+ (const_int 7)
+ (const_int 15)]))
+ (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
+ (parallel [(const_int 8)
(const_int 0)
+ (const_int 9)
(const_int 1)
+ (const_int 10)
(const_int 2)
- (const_int 3)
+ (const_int 11)
+ (const_int 3)
+ (const_int 12)
(const_int 4)
+ (const_int 13)
(const_int 5)
+ (const_int 14)
(const_int 6)
+ (const_int 15)
(const_int 7)]))
- (match_operand:V16QI 2 "register_operand" "v")
- (const_int 255)))]
+ (const_int 21845)))]
"TARGET_ALTIVEC"
"vmrghb %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -769,16 +802,24 @@
(define_insn "altivec_vmrghh"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (parallel [(const_int 4)
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 1)
(const_int 5)
+ (const_int 2)
(const_int 6)
- (const_int 7)
+ (const_int 3)
+ (const_int 7)]))
+ (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
+ (parallel [(const_int 4)
(const_int 0)
+ (const_int 5)
(const_int 1)
+ (const_int 6)
(const_int 2)
+ (const_int 7)
(const_int 3)]))
- (match_operand:V8HI 2 "register_operand" "v")
- (const_int 15)))]
+ (const_int 85)))]
"TARGET_ALTIVEC"
"vmrghh %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -786,131 +827,108 @@
(define_insn "altivec_vmrghw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
(parallel [(const_int 2)
- (const_int 3)
- (const_int 0)
+ (const_int 0)
+ (const_int 3)
(const_int 1)]))
- (match_operand:V4SI 2 "register_operand" "v")
- (const_int 12)))]
+ (const_int 5)))]
"TARGET_ALTIVEC"
"vmrghw %0,%1,%2"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vmrglb"
[(set (match_operand:V16QI 0 "register_operand" "=v")
- (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
- (parallel [(const_int 0)
+ (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel [(const_int 8)
+ (const_int 0)
+ (const_int 9)
(const_int 1)
+ (const_int 10)
(const_int 2)
- (const_int 3)
+ (const_int 11)
+ (const_int 3)
+ (const_int 12)
(const_int 4)
+ (const_int 13)
(const_int 5)
- (const_int 6)
- (const_int 7)
+ (const_int 14)
+ (const_int 6)
+ (const_int 15)
+ (const_int 7)]))
+ (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
+ (parallel [(const_int 0)
(const_int 8)
+ (const_int 1)
(const_int 9)
+ (const_int 2)
(const_int 10)
- (const_int 11)
+ (const_int 3)
+ (const_int 11)
+ (const_int 4)
(const_int 12)
+ (const_int 5)
(const_int 13)
+ (const_int 6)
(const_int 14)
+ (const_int 7)
(const_int 15)]))
- (match_operand:V16QI 1 "register_operand" "v")
- (const_int 255)))]
+ (const_int 21845)))]
"TARGET_ALTIVEC"
"vmrglb %0,%1,%2"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vmrglh"
[(set (match_operand:V8HI 0 "register_operand" "=v")
- (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
- (parallel [(const_int 0)
+ (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel [(const_int 4)
+ (const_int 0)
+ (const_int 5)
(const_int 1)
+ (const_int 6)
(const_int 2)
- (const_int 3)
+ (const_int 7)
+ (const_int 3)]))
+ (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
+ (parallel [(const_int 0)
(const_int 4)
+ (const_int 1)
(const_int 5)
+ (const_int 2)
(const_int 6)
+ (const_int 3)
(const_int 7)]))
- (match_operand:V8HI 1 "register_operand" "v")
- (const_int 15)))]
+ (const_int 85)))]
"TARGET_ALTIVEC"
"vmrglh %0,%1,%2"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vmrglw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
(parallel [(const_int 0)
- (const_int 1)
- (const_int 2)
+ (const_int 2)
+ (const_int 1)
(const_int 3)]))
- (match_operand:V4SI 1 "register_operand" "v")
- (const_int 12)))]
+ (const_int 5)))]
"TARGET_ALTIVEC"
"vmrglw %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "uminv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (umin:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vminub %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "sminv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (smin:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vminsb %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "uminv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (umin:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vminuh %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "sminv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (smin:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vminsh %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "uminv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (umin:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vminuw %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "sminv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (smin:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vminsw %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "sminv4sf3"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vminfp %0,%1,%2"
- [(set_attr "type" "veccmp")])
-
(define_insn "altivec_vmuleub"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 83))]
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULEUB))]
"TARGET_ALTIVEC"
"vmuleub %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -918,7 +936,8 @@
(define_insn "altivec_vmulesb"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 84))]
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULESB))]
"TARGET_ALTIVEC"
"vmulesb %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -926,7 +945,8 @@
(define_insn "altivec_vmuleuh"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 85))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULEUH))]
"TARGET_ALTIVEC"
"vmuleuh %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -934,7 +954,8 @@
(define_insn "altivec_vmulesh"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 86))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULESH))]
"TARGET_ALTIVEC"
"vmulesh %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -942,7 +963,8 @@
(define_insn "altivec_vmuloub"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 87))]
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULOUB))]
"TARGET_ALTIVEC"
"vmuloub %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -950,7 +972,8 @@
(define_insn "altivec_vmulosb"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 88))]
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULOSB))]
"TARGET_ALTIVEC"
"vmulosb %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -958,7 +981,8 @@
(define_insn "altivec_vmulouh"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 89))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULOUH))]
"TARGET_ALTIVEC"
"vmulouh %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -966,31 +990,83 @@
(define_insn "altivec_vmulosh"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 90))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULOSH))]
"TARGET_ALTIVEC"
"vmulosh %0,%1,%2"
[(set_attr "type" "veccomplex")])
-(define_insn "altivec_vnor"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (not:V4SI (ior:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v"))))]
+
+;; logical ops
+
+(define_insn "and<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (and:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
- "vnor %0,%1,%2"
+ "vand %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "iorv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (ior:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
+(define_insn "ior<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (ior:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
"TARGET_ALTIVEC"
"vor %0,%1,%2"
[(set_attr "type" "vecsimple")])
+(define_insn "xor<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (xor:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "xorv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (xor:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (not:VI (match_operand:VI 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vnor %0,%1,%1"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_nor<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (not:VI (ior:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v"))))]
+ "TARGET_ALTIVEC"
+ "vnor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "andc<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (and:VI (not:VI (match_operand:VI 2 "register_operand" "v"))
+ (match_operand:VI 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "*andc3_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (and:V4SF (not:V4SF (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
(define_insn "altivec_vpkuhum"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 93))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKUHUM))]
"TARGET_ALTIVEC"
"vpkuhum %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -998,7 +1074,8 @@
(define_insn "altivec_vpkuwum"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 94))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKUWUM))]
"TARGET_ALTIVEC"
"vpkuwum %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -1006,43 +1083,28 @@
(define_insn "altivec_vpkpx"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 95))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKPX))]
"TARGET_ALTIVEC"
"vpkpx %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vpkuhss"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 96))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vpkuhss %0,%1,%2"
- [(set_attr "type" "vecperm")])
-
(define_insn "altivec_vpkshss"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 97))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKSHSS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vpkshss %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vpkuwss"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 98))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vpkuwss %0,%1,%2"
- [(set_attr "type" "vecperm")])
-
(define_insn "altivec_vpkswss"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 99))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKSWSS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vpkswss %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -1050,8 +1112,9 @@
(define_insn "altivec_vpkuhus"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 100))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKUHUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vpkuhus %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -1059,8 +1122,9 @@
(define_insn "altivec_vpkshus"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 101))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKSHUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vpkshus %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -1068,8 +1132,9 @@
(define_insn "altivec_vpkuwus"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 102))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKUWUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vpkuwus %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -1077,72 +1142,36 @@
(define_insn "altivec_vpkswus"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 103))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKSWUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vpkswus %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vrlb"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 104))]
- "TARGET_ALTIVEC"
- "vrlb %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vrlh"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 105))]
- "TARGET_ALTIVEC"
- "vrlh %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vrlw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 106))]
- "TARGET_ALTIVEC"
- "vrlw %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vslb"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 107))]
+(define_insn "altivec_vrl<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VRL))]
"TARGET_ALTIVEC"
- "vslb %0,%1,%2"
+ "vrl<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vslh"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 108))]
+(define_insn "altivec_vsl<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSL))]
"TARGET_ALTIVEC"
- "vslh %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vslw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 109))]
- "TARGET_ALTIVEC"
- "vslw %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vslw_v4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 109))]
- "TARGET_ALTIVEC"
- "vslw %0,%1,%2"
+ "vsl<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_vsl"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 110))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSLV4SI))]
"TARGET_ALTIVEC"
"vsl %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -1150,63 +1179,33 @@
(define_insn "altivec_vslo"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 111))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSLO))]
"TARGET_ALTIVEC"
"vslo %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vsrb"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 112))]
- "TARGET_ALTIVEC"
- "vsrb %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsrh"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 113))]
- "TARGET_ALTIVEC"
- "vsrh %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsrw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 114))]
+(define_insn "lshr<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (lshiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
"TARGET_ALTIVEC"
- "vsrw %0,%1,%2"
+ "vsr<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vsrab"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 115))]
+(define_insn "ashr<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (ashiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
"TARGET_ALTIVEC"
- "vsrab %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsrah"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 116))]
- "TARGET_ALTIVEC"
- "vsrah %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsraw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 117))]
- "TARGET_ALTIVEC"
- "vsraw %0,%1,%2"
+ "vsra<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_vsr"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 118))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSR))]
"TARGET_ALTIVEC"
"vsr %0,%1,%2"
[(set_attr "type" "vecperm")])
@@ -1214,137 +1213,38 @@
(define_insn "altivec_vsro"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 119))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSRO))]
"TARGET_ALTIVEC"
"vsro %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "subv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (minus:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vsububm %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "subv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (minus:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vsubuhm %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "subv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (minus:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vsubuwm %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "subv4sf3"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vsubfp %0,%1,%2"
- [(set_attr "type" "vecfloat")])
-
-(define_insn "altivec_vsubcuw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 124))]
- "TARGET_ALTIVEC"
- "vsubcuw %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsububs"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 125))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vsububs %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsubsbs"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 126))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vsubsbs %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsubuhs"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 127))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vsubuhs %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsubshs"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 128))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vsubshs %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsubuws"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 129))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vsubuws %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "altivec_vsubsws"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 130))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vsubsws %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
(define_insn "altivec_vsum4ubs"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 131))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM4UBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vsum4ubs %0,%1,%2"
[(set_attr "type" "veccomplex")])
-(define_insn "altivec_vsum4sbs"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 132))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
- "TARGET_ALTIVEC"
- "vsum4sbs %0,%1,%2"
- [(set_attr "type" "veccomplex")])
-
-(define_insn "altivec_vsum4shs"
+(define_insn "altivec_vsum4s<VI_char>s"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 133))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM4S))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
- "vsum4shs %0,%1,%2"
+ "vsum4s<VI_char>s %0,%1,%2"
[(set_attr "type" "veccomplex")])
(define_insn "altivec_vsum2sws"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 134))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM2SWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vsum2sws %0,%1,%2"
[(set_attr "type" "veccomplex")])
@@ -1352,91 +1252,59 @@
(define_insn "altivec_vsumsws"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 135))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUMSWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vsumsws %0,%1,%2"
[(set_attr "type" "veccomplex")])
-;; Vector xor's
-(define_insn "xorv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (xor:V4SI (match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vxor %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "xorv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (xor:V8HI (match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vxor %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
-(define_insn "xorv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (xor:V16QI (match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")))]
- "TARGET_ALTIVEC"
- "vxor %0,%1,%2"
- [(set_attr "type" "vecsimple")])
-
(define_insn "altivec_vspltb"
[(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:QI 2 "immediate_operand" "i")] 136))]
+ (vec_duplicate:V16QI
+ (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
"TARGET_ALTIVEC"
"vspltb %0,%1,%2"
[(set_attr "type" "vecperm")])
-;; End of vector xor's
(define_insn "altivec_vsplth"
[(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:QI 2 "immediate_operand" "i")] 137))]
+ (vec_duplicate:V8HI
+ (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
"TARGET_ALTIVEC"
"vsplth %0,%1,%2"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vspltw"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:QI 2 "immediate_operand" "i")] 138))]
+ (vec_duplicate:V4SI
+ (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
"TARGET_ALTIVEC"
"vspltw %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vspltisb"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:QI 1 "immediate_operand" "i")]
- UNSPEC_VSPLTISB))]
- "TARGET_ALTIVEC"
- "vspltisb %0,%1"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vspltish"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:QI 1 "immediate_operand" "i")]
- UNSPEC_VSPLTISH))]
- "TARGET_ALTIVEC"
- "vspltish %0,%1"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vspltisw"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:QI 1 "immediate_operand" "i")]
- UNSPEC_VSPLTISW))]
+(define_insn "*altivec_vspltsf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (vec_duplicate:V4SF
+ (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
"TARGET_ALTIVEC"
- "vspltisw %0,%1"
+ "vspltw %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vspltisw_v4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:QI 1 "immediate_operand" "i")] 142))]
+(define_insn "altivec_vspltis<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (vec_duplicate:VI
+ (match_operand:QI 1 "s5bit_cint_operand" "i")))]
"TARGET_ALTIVEC"
- "vspltisw %0,%1"
+ "vspltis<VI_char> %0,%1"
[(set_attr "type" "vecperm")])
(define_insn "ftruncv4sf2"
@@ -1446,59 +1314,36 @@
"vrfiz %0,%1"
[(set_attr "type" "vecfloat")])
-(define_insn "altivec_vperm_4si"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")
- (match_operand:V16QI 3 "register_operand" "v")] 144))]
- "TARGET_ALTIVEC"
- "vperm %0,%1,%2,%3"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vperm_4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")
- (match_operand:V16QI 3 "register_operand" "v")] 145))]
- "TARGET_ALTIVEC"
- "vperm %0,%1,%2,%3"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vperm_8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V16QI 3 "register_operand" "v")] 146))]
- "TARGET_ALTIVEC"
- "vperm %0,%1,%2,%3"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vperm_16qi"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")
- (match_operand:V16QI 3 "register_operand" "v")] 147))]
+(define_insn "altivec_vperm_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VPERM))]
"TARGET_ALTIVEC"
"vperm %0,%1,%2,%3"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vrfip"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 148))]
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIP))]
"TARGET_ALTIVEC"
"vrfip %0,%1"
[(set_attr "type" "vecfloat")])
(define_insn "altivec_vrfin"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 149))]
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIN))]
"TARGET_ALTIVEC"
"vrfin %0,%1"
[(set_attr "type" "vecfloat")])
(define_insn "altivec_vrfim"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 150))]
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIM))]
"TARGET_ALTIVEC"
"vrfim %0,%1"
[(set_attr "type" "vecfloat")])
@@ -1506,7 +1351,8 @@
(define_insn "altivec_vcfux"
[(set (match_operand:V4SF 0 "register_operand" "=v")
(unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:QI 2 "immediate_operand" "i")] 151))]
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCFUX))]
"TARGET_ALTIVEC"
"vcfux %0,%1,%2"
[(set_attr "type" "vecfloat")])
@@ -1514,7 +1360,8 @@
(define_insn "altivec_vcfsx"
[(set (match_operand:V4SF 0 "register_operand" "=v")
(unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:QI 2 "immediate_operand" "i")] 152))]
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCFSX))]
"TARGET_ALTIVEC"
"vcfsx %0,%1,%2"
[(set_attr "type" "vecfloat")])
@@ -1522,8 +1369,9 @@
(define_insn "altivec_vctuxs"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:QI 2 "immediate_operand" "i")] 153))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCTUXS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vctuxs %0,%1,%2"
[(set_attr "type" "vecfloat")])
@@ -1531,150 +1379,273 @@
(define_insn "altivec_vctsxs"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:QI 2 "immediate_operand" "i")] 154))
- (set (reg:SI 110) (unspec:SI [(const_int 0)] 213))]
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCTSXS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
"TARGET_ALTIVEC"
"vctsxs %0,%1,%2"
[(set_attr "type" "vecfloat")])
(define_insn "altivec_vlogefp"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 155))]
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VLOGEFP))]
"TARGET_ALTIVEC"
"vlogefp %0,%1"
[(set_attr "type" "vecfloat")])
(define_insn "altivec_vexptefp"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 156))]
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VEXPTEFP))]
"TARGET_ALTIVEC"
"vexptefp %0,%1"
[(set_attr "type" "vecfloat")])
(define_insn "altivec_vrsqrtefp"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 157))]
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRSQRTEFP))]
"TARGET_ALTIVEC"
"vrsqrtefp %0,%1"
[(set_attr "type" "vecfloat")])
(define_insn "altivec_vrefp"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 158))]
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VREFP))]
"TARGET_ALTIVEC"
"vrefp %0,%1"
[(set_attr "type" "vecfloat")])
-(define_insn "altivec_vsel_4si"
+(define_expand "vcondv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "comparison_operator" "")
+ (match_operand:V4SI 4 "register_operand" "v")
+ (match_operand:V4SI 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V4SI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "comparison_operator" "")
+ (match_operand:V4SI 4 "register_operand" "v")
+ (match_operand:V4SI 5 "register_operand" "v")
+ ] UNSPEC_VCONDU_V4SI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand:V4SF 3 "comparison_operator" "")
+ (match_operand:V4SF 4 "register_operand" "v")
+ (match_operand:V4SF 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "comparison_operator" "")
+ (match_operand:V8HI 4 "register_operand" "v")
+ (match_operand:V8HI 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V8HI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "comparison_operator" "")
+ (match_operand:V8HI 4 "register_operand" "v")
+ (match_operand:V8HI 5 "register_operand" "v")
+ ] UNSPEC_VCONDU_V8HI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv16qi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "comparison_operator" "")
+ (match_operand:V16QI 4 "register_operand" "v")
+ (match_operand:V16QI 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V16QI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv16qi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "comparison_operator" "")
+ (match_operand:V16QI 4 "register_operand" "v")
+ (match_operand:V16QI 5 "register_operand" "v")
+ ] UNSPEC_VCONDU_V16QI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+
+(define_insn "altivec_vsel_v4si"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
(match_operand:V4SI 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 159))]
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VSEL4SI))]
"TARGET_ALTIVEC"
"vsel %0,%1,%2,%3"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vsel_4sf"
+(define_insn "altivec_vsel_v4sf"
[(set (match_operand:V4SF 0 "register_operand" "=v")
(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
(match_operand:V4SF 2 "register_operand" "v")
- (match_operand:V4SI 3 "register_operand" "v")] 160))]
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VSEL4SF))]
"TARGET_ALTIVEC"
"vsel %0,%1,%2,%3"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vsel_8hi"
+(define_insn "altivec_vsel_v8hi"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
(match_operand:V8HI 2 "register_operand" "v")
- (match_operand:V8HI 3 "register_operand" "v")] 161))]
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VSEL8HI))]
"TARGET_ALTIVEC"
"vsel %0,%1,%2,%3"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vsel_16qi"
+(define_insn "altivec_vsel_v16qi"
[(set (match_operand:V16QI 0 "register_operand" "=v")
(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
(match_operand:V16QI 2 "register_operand" "v")
- (match_operand:V16QI 3 "register_operand" "v")] 162))]
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VSEL16QI))]
"TARGET_ALTIVEC"
"vsel %0,%1,%2,%3"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vsldoi_4si"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")
- (match_operand:QI 3 "immediate_operand" "i")] 163))]
- "TARGET_ALTIVEC"
- "vsldoi %0,%1,%2,%3"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vsldoi_4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")
- (match_operand:QI 3 "immediate_operand" "i")] 164))]
- "TARGET_ALTIVEC"
- "vsldoi %0,%1,%2,%3"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vsldoi_8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")
- (match_operand:QI 3 "immediate_operand" "i")] 165))]
- "TARGET_ALTIVEC"
- "vsldoi %0,%1,%2,%3"
- [(set_attr "type" "vecperm")])
-
-(define_insn "altivec_vsldoi_16qi"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")
- (match_operand:QI 3 "immediate_operand" "i")] 166))]
+(define_insn "altivec_vsldoi_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "i")]
+ UNSPEC_VLSDOI))]
"TARGET_ALTIVEC"
"vsldoi %0,%1,%2,%3"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vupkhsb"
[(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] 167))]
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSB))]
"TARGET_ALTIVEC"
"vupkhsb %0,%1"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vupkhpx"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 168))]
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHPX))]
"TARGET_ALTIVEC"
"vupkhpx %0,%1"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vupkhsh"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 169))]
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSH))]
"TARGET_ALTIVEC"
"vupkhsh %0,%1"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vupklsb"
[(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] 170))]
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSB))]
"TARGET_ALTIVEC"
"vupklsb %0,%1"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vupklpx"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 171))]
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLPX))]
"TARGET_ALTIVEC"
"vupklpx %0,%1"
[(set_attr "type" "vecperm")])
(define_insn "altivec_vupklsh"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 172))]
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSH))]
"TARGET_ALTIVEC"
"vupklsh %0,%1"
[(set_attr "type" "vecperm")])
@@ -1714,42 +1685,12 @@
;; We can get away with generating the opcode on the fly (%3 below)
;; because all the predicates have the same scheduling parameters.
-(define_insn "altivec_predicate_v4si"
- [(set (reg:CC 74)
- (unspec:CC [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")
- (match_operand 3 "any_operand" "")] 173))
- (clobber (match_scratch:V4SI 0 "=v"))]
- "TARGET_ALTIVEC"
- "%3 %0,%1,%2"
-[(set_attr "type" "veccmp")])
-
-(define_insn "altivec_predicate_v4sf"
- [(set (reg:CC 74)
- (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")
- (match_operand 3 "any_operand" "")] 174))
- (clobber (match_scratch:V4SF 0 "=v"))]
- "TARGET_ALTIVEC"
- "%3 %0,%1,%2"
-[(set_attr "type" "veccmp")])
-
-(define_insn "altivec_predicate_v8hi"
- [(set (reg:CC 74)
- (unspec:CC [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")
- (match_operand 3 "any_operand" "")] 175))
- (clobber (match_scratch:V8HI 0 "=v"))]
- "TARGET_ALTIVEC"
- "%3 %0,%1,%2"
-[(set_attr "type" "veccmp")])
-
-(define_insn "altivec_predicate_v16qi"
+(define_insn "altivec_predicate_<mode>"
[(set (reg:CC 74)
- (unspec:CC [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")
- (match_operand 3 "any_operand" "")] 175))
- (clobber (match_scratch:V16QI 0 "=v"))]
+ (unspec:CC [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (clobber (match_scratch:V 0 "=v"))]
"TARGET_ALTIVEC"
"%3 %0,%1,%2"
[(set_attr "type" "veccmp")])
@@ -1757,102 +1698,114 @@
(define_insn "altivec_mtvscr"
[(set (reg:SI 110)
(unspec_volatile:SI
- [(match_operand:V4SI 0 "register_operand" "v")] 186))]
+ [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
"TARGET_ALTIVEC"
"mtvscr %0"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_mfvscr"
[(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec_volatile:V8HI [(reg:SI 110)] 187))]
+ (unspec_volatile:V8HI [(reg:SI 110)] UNSPECV_MFVSCR))]
"TARGET_ALTIVEC"
"mfvscr %0"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_dssall"
- [(unspec [(const_int 0)] 188)]
+ [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
"TARGET_ALTIVEC"
"dssall"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_dss"
- [(unspec [(match_operand:QI 0 "immediate_operand" "i")] 189)]
+ [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
+ UNSPECV_DSS)]
"TARGET_ALTIVEC"
"dss %0"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_dst"
- [(unspec [(match_operand:V4SI 0 "memory_operand" "Q")
+ [(unspec [(match_operand 0 "register_operand" "b")
(match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "immediate_operand" "i")] 190)]
- "TARGET_ALTIVEC"
- "dst %P0,%1,%2"
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dst %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_dstt"
- [(unspec [(match_operand:V4SI 0 "memory_operand" "Q")
+ [(unspec [(match_operand 0 "register_operand" "b")
(match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "immediate_operand" "i")] 191)]
- "TARGET_ALTIVEC"
- "dstt %P0,%1,%2"
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dstt %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_dstst"
- [(unspec [(match_operand:V4SI 0 "memory_operand" "Q")
+ [(unspec [(match_operand 0 "register_operand" "b")
(match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "immediate_operand" "i")] 192)]
- "TARGET_ALTIVEC"
- "dstst %P0,%1,%2"
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dstst %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_dststt"
- [(unspec [(match_operand:V4SI 0 "memory_operand" "Q")
+ [(unspec [(match_operand 0 "register_operand" "b")
(match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "immediate_operand" "i")] 193)]
- "TARGET_ALTIVEC"
- "dststt %P0,%1,%2"
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dststt %0,%1,%2"
[(set_attr "type" "vecsimple")])
(define_insn "altivec_lvsl"
[(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand 1 "memory_operand" "m")] 194))]
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] UNSPEC_LVSL))]
"TARGET_ALTIVEC"
"lvsl %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvsr"
[(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand 1 "memory_operand" "m")] 195))]
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] UNSPEC_LVSR))]
"TARGET_ALTIVEC"
"lvsr %0,%y1"
[(set_attr "type" "vecload")])
+(define_expand "build_vector_mask_for_load"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_LVSR))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx addr;
+ rtx temp;
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ addr = XEXP (operands[1], 0);
+ temp = gen_reg_rtx (GET_MODE (addr));
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_NEG (GET_MODE (addr), addr)));
+ emit_insn (gen_altivec_lvsr (operands[0],
+ replace_equiv_address (operands[1], temp)));
+ DONE;
+}")
+
;; Parallel some of the LVE* and STV*'s with unspecs because some have
;; identical rtl but different instructions-- and gcc gets confused.
-(define_insn "altivec_lvebx"
+(define_insn "altivec_lve<VI_char>x"
[(parallel
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (match_operand:V16QI 1 "memory_operand" "m"))
- (unspec [(const_int 0)] 196)])]
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (match_operand:VI 1 "memory_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_LVE)])]
"TARGET_ALTIVEC"
- "lvebx %0,%y1"
+ "lve<VI_char>x %0,%y1"
[(set_attr "type" "vecload")])
-(define_insn "altivec_lvehx"
+(define_insn "*altivec_lvesfx"
[(parallel
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (match_operand:V8HI 1 "memory_operand" "m"))
- (unspec [(const_int 0)] 197)])]
- "TARGET_ALTIVEC"
- "lvehx %0,%y1"
- [(set_attr "type" "vecload")])
-
-(define_insn "altivec_lvewx"
- [(parallel
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (match_operand:V4SI 1 "memory_operand" "m"))
- (unspec [(const_int 0)] 198)])]
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (match_operand:V4SF 1 "memory_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_LVE)])]
"TARGET_ALTIVEC"
"lvewx %0,%y1"
[(set_attr "type" "vecload")])
@@ -1860,130 +1813,412 @@
(define_insn "altivec_lvxl"
[(parallel
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (match_operand:V4SI 1 "memory_operand" "m"))
- (unspec [(const_int 0)] 213)])]
+ (match_operand:V4SI 1 "memory_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
"TARGET_ALTIVEC"
"lvxl %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvx"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (match_operand:V4SI 1 "memory_operand" "m"))]
+ (match_operand:V4SI 1 "memory_operand" "Z"))]
"TARGET_ALTIVEC"
"lvx %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_stvx"
[(parallel
- [(set (match_operand:V4SI 0 "memory_operand" "=m")
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
(match_operand:V4SI 1 "register_operand" "v"))
- (unspec [(const_int 0)] 201)])]
+ (unspec [(const_int 0)] UNSPEC_STVX)])]
"TARGET_ALTIVEC"
"stvx %1,%y0"
[(set_attr "type" "vecstore")])
(define_insn "altivec_stvxl"
[(parallel
- [(set (match_operand:V4SI 0 "memory_operand" "=m")
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
(match_operand:V4SI 1 "register_operand" "v"))
- (unspec [(const_int 0)] 202)])]
+ (unspec [(const_int 0)] UNSPEC_STVXL)])]
"TARGET_ALTIVEC"
"stvxl %1,%y0"
[(set_attr "type" "vecstore")])
-(define_insn "altivec_stvebx"
+(define_insn "altivec_stve<VI_char>x"
[(parallel
- [(set (match_operand:V16QI 0 "memory_operand" "=m")
- (match_operand:V16QI 1 "register_operand" "v"))
- (unspec [(const_int 0)] 203)])]
+ [(set (match_operand:VI 0 "memory_operand" "=Z")
+ (match_operand:VI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVE)])]
"TARGET_ALTIVEC"
- "stvebx %1,%y0"
+ "stve<VI_char>x %1,%y0"
[(set_attr "type" "vecstore")])
-(define_insn "altivec_stvehx"
+(define_insn "*altivec_stvesfx"
[(parallel
- [(set (match_operand:V8HI 0 "memory_operand" "=m")
- (match_operand:V8HI 1 "register_operand" "v"))
- (unspec [(const_int 0)] 204)])]
+ [(set (match_operand:V4SF 0 "memory_operand" "=Z")
+ (match_operand:V4SF 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVE)])]
"TARGET_ALTIVEC"
- "stvehx %1,%y0"
+ "stvewx %1,%y0"
[(set_attr "type" "vecstore")])
-(define_insn "altivec_stvewx"
- [(parallel
- [(set (match_operand:V4SI 0 "memory_operand" "=m")
- (match_operand:V4SI 1 "register_operand" "v"))
- (unspec [(const_int 0)] 205)])]
+(define_expand "vec_init<mode>"
+ [(match_operand:V 0 "register_operand" "")
+ (match_operand 1 "" "")]
"TARGET_ALTIVEC"
- "stvewx %1,%y0"
- [(set_attr "type" "vecstore")])
+{
+ rs6000_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
-(define_insn "absv16qi2"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (abs:V16QI (match_operand:V16QI 1 "register_operand" "v")))
- (clobber (match_scratch:V16QI 2 "=&v"))
- (clobber (match_scratch:V16QI 3 "=&v"))]
+(define_expand "vec_setv4si"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
"TARGET_ALTIVEC"
- "vspltisb %2,0\;vsububm %3,%2,%1\;vmaxsb %0,%1,%3"
- [(set_attr "type" "vecsimple")
- (set_attr "length" "12")])
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
-(define_insn "absv8hi2"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (abs:V8HI (match_operand:V8HI 1 "register_operand" "v")))
- (clobber (match_scratch:V8HI 2 "=&v"))
- (clobber (match_scratch:V8HI 3 "=&v"))]
+(define_expand "vec_setv8hi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
"TARGET_ALTIVEC"
- "vspltisb %2,0\;vsubuhm %3,%2,%1\;vmaxsh %0,%1,%3"
- [(set_attr "type" "vecsimple")
- (set_attr "length" "12")])
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
-(define_insn "absv4si2"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (abs:V4SI (match_operand:V4SI 1 "register_operand" "v")))
- (clobber (match_scratch:V4SI 2 "=&v"))
- (clobber (match_scratch:V4SI 3 "=&v"))]
+(define_expand "vec_setv16qi"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand:QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
"TARGET_ALTIVEC"
- "vspltisb %2,0\;vsubuwm %3,%2,%1\;vmaxsw %0,%1,%3"
- [(set_attr "type" "vecsimple")
- (set_attr "length" "12")])
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
-(define_insn "absv4sf2"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (abs:V4SF (match_operand:V4SF 1 "register_operand" "v")))
- (clobber (match_scratch:V4SF 2 "=&v"))
- (clobber (match_scratch:V4SF 3 "=&v"))]
+(define_expand "vec_setv4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4si"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv8hi"
+ [(match_operand:HI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv16qi"
+ [(match_operand:QI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4sf"
+ [(match_operand:SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+;; Generate
+;; vspltis? SCRATCH0,0
+;; vsubu?m SCRATCH2,SCRATCH1,%1
+;; vmaxs? %0,%1,SCRATCH2"
+(define_expand "abs<mode>2"
+ [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
+ (set (match_dup 3)
+ (minus:VI (match_dup 2)
+ (match_operand:VI 1 "register_operand" "v")))
+ (set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_dup 1) (match_dup 3)))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
+ operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
+})
+
+;; Generate
+;; vspltisw SCRATCH1,-1
+;; vslw SCRATCH2,SCRATCH1,SCRATCH1
+;; vandc %0,%1,SCRATCH2
+(define_expand "absv4sf2"
+ [(set (match_dup 2)
+ (vec_duplicate:V4SI (const_int -1)))
+ (set (match_dup 3)
+ (unspec:V4SI [(match_dup 2) (match_dup 2)] UNSPEC_VSL))
+ (set (match_operand:V4SF 0 "register_operand" "=v")
+ (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
+ (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (V4SImode);
+ operands[3] = gen_reg_rtx (V4SImode);
+})
+
+;; Generate
+;; vspltis? SCRATCH0,0
+;; vsubs?s SCRATCH2,SCRATCH1,%1
+;; vmaxs? %0,%1,SCRATCH2"
+(define_expand "altivec_abss_<mode>"
+ [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
+ (parallel [(set (match_dup 3)
+ (unspec:VI [(match_dup 2)
+ (match_operand:VI 1 "register_operand" "v")]
+ UNSPEC_VSUBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
+ (set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_dup 1) (match_dup 3)))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
+ operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
+})
+
+;; Vector shift left in bits. Currently supported ony for shift
+;; amounts that can be expressed as byte shifts (divisible by 8).
+;; General shift amounts can be supported using vslo + vsl. We're
+;; not expecting to see these yet (the vectorizer currently
+;; generates only shifts divisible by byte_size).
+(define_expand "vec_shl_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:QI 2 "reg_or_short_operand" "")]
+ UNSPEC_VECSH))]
"TARGET_ALTIVEC"
- "vspltisw %2,-1\;vslw %3,%2,%2\;vandc %0,%1,%3"
- [(set_attr "type" "vecsimple")
- (set_attr "length" "12")])
+ "
+{
+ rtx bitshift = operands[2];
+ rtx byteshift = gen_reg_rtx (QImode);
+ HOST_WIDE_INT bitshift_val;
+ HOST_WIDE_INT byteshift_val;
+
+ if (! CONSTANT_P (bitshift))
+ FAIL;
+ bitshift_val = INTVAL (bitshift);
+ if (bitshift_val & 0x7)
+ FAIL;
+ byteshift_val = bitshift_val >> 3;
+ byteshift = gen_rtx_CONST_INT (QImode, byteshift_val);
+ emit_insn (gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1],
+ byteshift));
+ DONE;
+}")
+
+;; Vector shift left in bits. Currently supported ony for shift
+;; amounts that can be expressed as byte shifts (divisible by 8).
+;; General shift amounts can be supported using vsro + vsr. We're
+;; not expecting to see these yet (the vectorizer currently
+;; generates only shifts divisible by byte_size).
+(define_expand "vec_shr_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:QI 2 "reg_or_short_operand" "")]
+ UNSPEC_VECSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx bitshift = operands[2];
+ rtx byteshift = gen_reg_rtx (QImode);
+ HOST_WIDE_INT bitshift_val;
+ HOST_WIDE_INT byteshift_val;
+
+ if (! CONSTANT_P (bitshift))
+ FAIL;
+ bitshift_val = INTVAL (bitshift);
+ if (bitshift_val & 0x7)
+ FAIL;
+ byteshift_val = 16 - (bitshift_val >> 3);
+ byteshift = gen_rtx_CONST_INT (QImode, byteshift_val);
+ emit_insn (gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1],
+ byteshift));
+ DONE;
+}")
+
+(define_insn "altivec_vsumsws_nomode"
+ [(set (match_operand 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUMSWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsumsws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_expand "reduc_splus_<mode>"
+ [(set (match_operand:VIshort 0 "register_operand" "=v")
+ (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
+ UNSPEC_REDUC_PLUS))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx vtmp1 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+ emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
+ emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero));
+ DONE;
+}")
-(define_insn "altivec_abss_v16qi"
+(define_expand "reduc_uplus_v16qi"
[(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")] 210))
- (clobber (match_scratch:V16QI 2 "=&v"))
- (clobber (match_scratch:V16QI 3 "=&v"))]
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_REDUC_PLUS))]
"TARGET_ALTIVEC"
- "vspltisb %2,0\;vsubsbs %3,%2,%1\;vmaxsb %0,%1,%3"
- [(set_attr "type" "vecsimple")
- (set_attr "length" "12")])
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx vtmp1 = gen_reg_rtx (V4SImode);
-(define_insn "altivec_abss_v8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")] 211))
- (clobber (match_scratch:V8HI 2 "=&v"))
- (clobber (match_scratch:V8HI 3 "=&v"))]
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+ emit_insn (gen_altivec_vsum4ubs (vtmp1, operands[1], vzero));
+ emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero));
+ DONE;
+}")
+
+(define_insn "vec_realign_load_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_REALIGN_LOAD))]
"TARGET_ALTIVEC"
- "vspltisb %2,0\;vsubshs %3,%2,%1\;vmaxsh %0,%1,%3"
- [(set_attr "type" "vecsimple")
- (set_attr "length" "12")])
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_expand "neg<mode>2"
+ [(use (match_operand:VI 0 "register_operand" ""))
+ (use (match_operand:VI 1 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero;
-(define_insn "altivec_abss_v4si"
+ vzero = gen_reg_rtx (GET_MODE (operands[0]));
+ emit_insn (gen_altivec_vspltis<VI_char> (vzero, const0_rtx));
+ emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
+
+ DONE;
+}")
+
+(define_expand "udot_prod<mode>"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")] 212))
- (clobber (match_scratch:V4SI 2 "=&v"))
- (clobber (match_scratch:V4SI 3 "=&v"))]
+ (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")]
+ UNSPEC_VMSUMU)))]
"TARGET_ALTIVEC"
- "vspltisb %2,0\;vsubsws %3,%2,%1\;vmaxsw %0,%1,%3"
- [(set_attr "type" "vecsimple")
- (set_attr "length" "12")])
+ "
+{
+ emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_expand "sdot_prodv8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMSUMSHM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_expand "widen_usum<mode>3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
+ UNSPEC_VMSUMU)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
+
+ emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "widen_ssumv16qi3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VMSUMM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (V16QImode);
+
+ emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "widen_ssumv8hi3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VMSUMSHM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_altivec_vspltish (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "negv4sf2"
+ [(use (match_operand:V4SF 0 "register_operand" ""))
+ (use (match_operand:V4SF 1 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx neg0;
+
+ /* Generate [-0.0, -0.0, -0.0, -0.0]. */
+ neg0 = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
+ emit_insn (gen_altivec_vslw (neg0, neg0, neg0));
+
+ /* XOR */
+ emit_insn (gen_xorv4sf3 (operands[0],
+ gen_lowpart (V4SFmode, neg0), operands[1]));
+
+ DONE;
+}")
diff --git a/contrib/gcc/config/rs6000/beos.h b/contrib/gcc/config/rs6000/beos.h
index a9e88ac..cea5ca8 100644
--- a/contrib/gcc/config/rs6000/beos.h
+++ b/contrib/gcc/config/rs6000/beos.h
@@ -1,5 +1,6 @@
/* Definitions of target machine for GNU compiler, for BeOS.
- Copyright (C) 1997, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1997, 2000, 2001, 2002, 2003, 2005
+ Free Software Foundation, Inc.
Contributed by Fred Fish (fnf@cygnus.com), based on aix41.h
from David Edelsohn (edelsohn@npac.syr.edu).
@@ -17,8 +18,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (BeOS/PowerPC)");
diff --git a/contrib/gcc/config/rs6000/biarch64.h b/contrib/gcc/config/rs6000/biarch64.h
index 3f8addd..c5cb598 100644
--- a/contrib/gcc/config/rs6000/biarch64.h
+++ b/contrib/gcc/config/rs6000/biarch64.h
@@ -15,8 +15,8 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
/* Specify this in a cover file to provide bi-architecture (32/64) support. */
#define RS6000_BI_ARCH 1
diff --git a/contrib/gcc/config/rs6000/constraints.md b/contrib/gcc/config/rs6000/constraints.md
new file mode 100644
index 0000000..a7d4661
--- /dev/null
+++ b/contrib/gcc/config/rs6000/constraints.md
@@ -0,0 +1,162 @@
+;; Constraint definitions for RS6000
+;; Copyright (C) 2006 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; Register constraints
+
+(define_register_constraint "f" "TARGET_HARD_FLOAT && TARGET_FPRS
+ ? FLOAT_REGS : NO_REGS"
+ "@internal")
+
+(define_register_constraint "b" "BASE_REGS"
+ "@internal")
+
+(define_register_constraint "h" "SPECIAL_REGS"
+ "@internal")
+
+(define_register_constraint "q" "MQ_REGS"
+ "@internal")
+
+(define_register_constraint "c" "CTR_REGS"
+ "@internal")
+
+(define_register_constraint "l" "LINK_REGS"
+ "@internal")
+
+(define_register_constraint "v" "ALTIVEC_REGS"
+ "@internal")
+
+(define_register_constraint "x" "CR0_REGS"
+ "@internal")
+
+(define_register_constraint "y" "CR_REGS"
+ "@internal")
+
+(define_register_constraint "z" "XER_REGS"
+ "@internal")
+
+;; Integer constraints
+
+(define_constraint "I"
+ "A signed 16-bit constant"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) (ival + 0x8000) < 0x10000")))
+
+(define_constraint "J"
+ "high-order 16 bits nonzero"
+ (and (match_code "const_int")
+ (match_test "(ival & (~ (unsigned HOST_WIDE_INT) 0xffff0000)) == 0")))
+
+(define_constraint "K"
+ "low-order 16 bits nonzero"
+ (and (match_code "const_int")
+ (match_test "(ival & (~ (HOST_WIDE_INT) 0xffff)) == 0")))
+
+(define_constraint "L"
+ "signed 16-bit constant shifted left 16 bits"
+ (and (match_code "const_int")
+ (match_test "((ival & 0xffff) == 0
+ && (ival >> 31 == -1 || ival >> 31 == 0))")))
+
+(define_constraint "M"
+ "constant greater than 31"
+ (and (match_code "const_int")
+ (match_test "ival > 31")))
+
+(define_constraint "N"
+ "positive constant that is an exact power of two"
+ (and (match_code "const_int")
+ (match_test "ival > 0 && exact_log2 (ival) >= 0")))
+
+(define_constraint "O"
+ "constant zero"
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "P"
+ "constant whose negation is signed 16-bit constant"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ((- ival) + 0x8000) < 0x10000")))
+
+;; Floating-point constraints
+
+(define_constraint "G"
+ "Constant that can be copied into GPR with two insns for DF/DI
+ and one for SF."
+ (and (match_code "const_double")
+ (match_test "num_insns_constant (op, mode)
+ == (mode == SFmode ? 1 : 2)")))
+
+(define_constraint "H"
+ "DF/DI constant that takes three insns."
+ (and (match_code "const_double")
+ (match_test "num_insns_constant (op, mode) == 3")))
+
+;; Memory constraints
+
+(define_memory_constraint "Q"
+ "Memory operand that is just an offset from a reg"
+ (and (match_code "mem")
+ (match_test "GET_CODE (XEXP (op, 0)) == REG")))
+
+(define_memory_constraint "Y"
+ "Indexed or word-aligned displacement memory operand"
+ (match_operand 0 "word_offset_memref_operand"))
+
+(define_memory_constraint "Z"
+ "Indexed or indirect memory operand"
+ (match_operand 0 "indexed_or_indirect_operand"))
+
+;; Address constraints
+
+(define_address_constraint "a"
+ "Indexed or indirect address operand"
+ (match_operand 0 "indexed_or_indirect_address"))
+
+(define_constraint "R"
+ "AIX TOC entry"
+ (match_test "legitimate_constant_pool_address_p (op)"))
+
+;; General constraints
+
+(define_constraint "S"
+ "Constant that can be placed into a 64-bit mask operand"
+ (match_operand 0 "mask64_operand"))
+
+(define_constraint "T"
+ "Constant that can be placed into a 32-bit mask operand"
+ (match_operand 0 "mask_operand"))
+
+(define_constraint "U"
+ "V.4 small data reference"
+ (and (match_test "DEFAULT_ABI == ABI_V4")
+ (match_operand 0 "small_data_operand")))
+
+(define_constraint "t"
+ "AND masks that can be performed by two rldic{l,r} insns
+ (but excluding those that could match other constraints of anddi3)"
+ (and (and (and (match_operand 0 "mask64_2_operand")
+ (match_test "(fixed_regs[CR0_REGNO]
+ || !logical_operand (op, DImode))"))
+ (not (match_operand 0 "mask_operand")))
+ (not (match_operand 0 "mask64_operand"))))
+
+(define_constraint "W"
+ "vector constant that does not require memory"
+ (match_operand 0 "easy_vector_constant"))
diff --git a/contrib/gcc/config/rs6000/crtsavres.asm b/contrib/gcc/config/rs6000/crtsavres.asm
index 327048e..498759d 100644
--- a/contrib/gcc/config/rs6000/crtsavres.asm
+++ b/contrib/gcc/config/rs6000/crtsavres.asm
@@ -25,8 +25,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
*
* As a special exception, if you link this library with files
* compiled with GCC to produce an executable, this does not cause
diff --git a/contrib/gcc/config/rs6000/darwin-asm.h b/contrib/gcc/config/rs6000/darwin-asm.h
new file mode 100644
index 0000000..401b4f8
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-asm.h
@@ -0,0 +1,61 @@
+/* Macro definitions to used to support 32/64-bit code in Darwin's
+ * assembly files.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+/* These are donated from /usr/include/architecture/ppc . */
+
+#if defined(__ppc64__)
+#define MODE_CHOICE(x, y) y
+#else
+#define MODE_CHOICE(x, y) x
+#endif
+
+#define cmpg MODE_CHOICE(cmpw, cmpd)
+#define lg MODE_CHOICE(lwz, ld)
+#define stg MODE_CHOICE(stw, std)
+#define lgx MODE_CHOICE(lwzx, ldx)
+#define stgx MODE_CHOICE(stwx, stdx)
+#define lgu MODE_CHOICE(lwzu, ldu)
+#define stgu MODE_CHOICE(stwu, stdu)
+#define lgux MODE_CHOICE(lwzux, ldux)
+#define stgux MODE_CHOICE(stwux, stdux)
+#define lgwa MODE_CHOICE(lwz, lwa)
+
+#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+
+#define GPR_BYTES MODE_CHOICE(4,8) /* size of a GPR in bytes */
+#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+
+#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* position of saved
+ LR in frame */
diff --git a/contrib/gcc/config/rs6000/darwin-fallback.c b/contrib/gcc/config/rs6000/darwin-fallback.c
new file mode 100644
index 0000000..5d3de32
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-fallback.c
@@ -0,0 +1,471 @@
+/* Fallback frame-state unwinder for Darwin.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combined
+ executable.)
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "dwarf2.h"
+#include "unwind.h"
+#include "unwind-dw2.h"
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <signal.h>
+
+typedef unsigned long reg_unit;
+
+/* Place in GPRS the parameters to the first 'sc' instruction that would
+ have been executed if we were returning from this CONTEXT, or
+ return false if an unexpected instruction is encountered. */
+
+static bool
+interpret_libc (reg_unit gprs[32], struct _Unwind_Context *context)
+{
+ uint32_t *pc = (uint32_t *)_Unwind_GetIP (context);
+ uint32_t cr;
+ reg_unit lr = (reg_unit) pc;
+ reg_unit ctr = 0;
+ uint32_t *invalid_address = NULL;
+
+ int i;
+
+ for (i = 0; i < 13; i++)
+ gprs[i] = 1;
+ gprs[1] = _Unwind_GetCFA (context);
+ for (; i < 32; i++)
+ gprs[i] = _Unwind_GetGR (context, i);
+ cr = _Unwind_GetGR (context, CR2_REGNO);
+
+ /* For each supported Libc, we have to track the code flow
+ all the way back into the kernel.
+
+ This code is believed to support all released Libc/Libsystem builds since
+ Jaguar 6C115, including all the security updates. To be precise,
+
+ Libc Libsystem Build(s)
+ 262~1 60~37 6C115
+ 262~1 60.2~4 6D52
+ 262~1 61~3 6F21-6F22
+ 262~1 63~24 6G30-6G37
+ 262~1 63~32 6I34-6I35
+ 262~1 63~64 6L29-6L60
+ 262.4.1~1 63~84 6L123-6R172
+
+ 320~1 71~101 7B85-7D28
+ 320~1 71~266 7F54-7F56
+ 320~1 71~288 7F112
+ 320~1 71~289 7F113
+ 320.1.3~1 71.1.1~29 7H60-7H105
+ 320.1.3~1 71.1.1~30 7H110-7H113
+ 320.1.3~1 71.1.1~31 7H114
+
+ That's a big table! It would be insane to try to keep track of
+ every little detail, so we just read the code itself and do what
+ it would do.
+ */
+
+ for (;;)
+ {
+ uint32_t ins = *pc++;
+
+ if ((ins & 0xFC000003) == 0x48000000) /* b instruction */
+ {
+ pc += ((((int32_t) ins & 0x3FFFFFC) ^ 0x2000000) - 0x2000004) / 4;
+ continue;
+ }
+ if ((ins & 0xFC600000) == 0x2C000000) /* cmpwi */
+ {
+ int32_t val1 = (int16_t) ins;
+ int32_t val2 = gprs[ins >> 16 & 0x1F];
+ /* Only beq and bne instructions are supported, so we only
+ need to set the EQ bit. */
+ uint32_t mask = 0xF << ((ins >> 21 & 0x1C) ^ 0x1C);
+ if (val1 == val2)
+ cr |= mask;
+ else
+ cr &= ~mask;
+ continue;
+ }
+ if ((ins & 0xFEC38003) == 0x40820000) /* forwards beq/bne */
+ {
+ if ((cr >> ((ins >> 16 & 0x1F) ^ 0x1F) & 1) == (ins >> 24 & 1))
+ pc += (ins & 0x7FFC) / 4 - 1;
+ continue;
+ }
+ if ((ins & 0xFC0007FF) == 0x7C000378) /* or, including mr */
+ {
+ gprs [ins >> 16 & 0x1F] = (gprs [ins >> 11 & 0x1F]
+ | gprs [ins >> 21 & 0x1F]);
+ continue;
+ }
+ if (ins >> 26 == 0x0E) /* addi, including li */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ gprs [ins >> 21 & 0x1F] = src + (int16_t) ins;
+ continue;
+ }
+ if (ins >> 26 == 0x0F) /* addis, including lis */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ gprs [ins >> 21 & 0x1F] = src + ((int16_t) ins << 16);
+ continue;
+ }
+ if (ins >> 26 == 0x20) /* lwz */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ if (p == invalid_address)
+ return false;
+ gprs [ins >> 21 & 0x1F] = *p;
+ continue;
+ }
+ if (ins >> 26 == 0x21) /* lwzu */
+ {
+ uint32_t *p = (uint32_t *)(gprs [ins >> 16 & 0x1F] += (int16_t) ins);
+ if (p == invalid_address)
+ return false;
+ gprs [ins >> 21 & 0x1F] = *p;
+ continue;
+ }
+ if (ins >> 26 == 0x24) /* stw */
+ /* What we hope this is doing is '--in_sigtramp'. We don't want
+ to actually store to memory, so just make a note of the
+ address and refuse to load from it. */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ if (p == NULL || invalid_address != NULL)
+ return false;
+ invalid_address = p;
+ continue;
+ }
+ if (ins >> 26 == 0x2E) /* lmw */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ int i;
+
+ for (i = (ins >> 21 & 0x1F); i < 32; i++)
+ {
+ if (p == invalid_address)
+ return false;
+ gprs[i] = *p++;
+ }
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0803a6) /* mtlr */
+ {
+ lr = gprs [ins >> 21 & 0x1F];
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0802a6) /* mflr */
+ {
+ gprs [ins >> 21 & 0x1F] = lr;
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0903a6) /* mtctr */
+ {
+ ctr = gprs [ins >> 21 & 0x1F];
+ continue;
+ }
+ /* The PowerPC User's Manual says that bit 11 of the mtcrf
+ instruction is reserved and should be set to zero, but it
+ looks like the Darwin assembler doesn't do that... */
+ if ((ins & 0xFC000FFF) == 0x7c000120) /* mtcrf */
+ {
+ int i;
+ uint32_t mask = 0;
+ for (i = 0; i < 8; i++)
+ mask |= ((-(ins >> (12 + i) & 1)) & 0xF) << 4 * i;
+ cr = (cr & ~mask) | (gprs [ins >> 21 & 0x1F] & mask);
+ continue;
+ }
+ if (ins == 0x429f0005) /* bcl- 20,4*cr7+so,.+4, loads pc into LR */
+ {
+ lr = (reg_unit) pc;
+ continue;
+ }
+ if (ins == 0x4e800420) /* bctr */
+ {
+ pc = (uint32_t *) ctr;
+ continue;
+ }
+ if (ins == 0x44000002) /* sc */
+ return true;
+
+ return false;
+ }
+}
+
+/* We used to include <ucontext.h> and <mach/thread_status.h>,
+ but they change so much between different Darwin system versions
+ that it's much easier to just write the structures involved here
+ directly. */
+
+/* These defines are from the kernel's bsd/dev/ppc/unix_signal.c. */
+#define UC_TRAD 1
+#define UC_TRAD_VEC 6
+#define UC_TRAD64 20
+#define UC_TRAD64_VEC 25
+#define UC_FLAVOR 30
+#define UC_FLAVOR_VEC 35
+#define UC_FLAVOR64 40
+#define UC_FLAVOR64_VEC 45
+#define UC_DUAL 50
+#define UC_DUAL_VEC 55
+
+struct gcc_ucontext
+{
+ int onstack;
+ sigset_t sigmask;
+ void * stack_sp;
+ size_t stack_sz;
+ int stack_flags;
+ struct gcc_ucontext *link;
+ size_t mcsize;
+ struct gcc_mcontext32 *mcontext;
+};
+
+struct gcc_float_vector_state
+{
+ double fpregs[32];
+ uint32_t fpscr_pad;
+ uint32_t fpscr;
+ uint32_t save_vr[32][4];
+ uint32_t save_vscr[4];
+};
+
+struct gcc_mcontext32 {
+ uint32_t dar;
+ uint32_t dsisr;
+ uint32_t exception;
+ uint32_t padding1[5];
+ uint32_t srr0;
+ uint32_t srr1;
+ uint32_t gpr[32];
+ uint32_t cr;
+ uint32_t xer;
+ uint32_t lr;
+ uint32_t ctr;
+ uint32_t mq;
+ uint32_t vrsave;
+ struct gcc_float_vector_state fvs;
+};
+
+/* These are based on /usr/include/ppc/ucontext.h and
+ /usr/include/mach/ppc/thread_status.h, but rewritten to be more
+ convenient, to compile on Jaguar, and to work around Radar 3712064
+ on Panther, which is that the 'es' field of 'struct mcontext64' has
+ the wrong type (doh!). */
+
+struct gcc_mcontext64 {
+ uint64_t dar;
+ uint32_t dsisr;
+ uint32_t exception;
+ uint32_t padding1[4];
+ uint64_t srr0;
+ uint64_t srr1;
+ uint32_t gpr[32][2];
+ uint32_t cr;
+ uint32_t xer[2]; /* These are arrays because the original structure has them misaligned. */
+ uint32_t lr[2];
+ uint32_t ctr[2];
+ uint32_t vrsave;
+ struct gcc_float_vector_state fvs;
+};
+
+#define UC_FLAVOR_SIZE \
+ (sizeof (struct gcc_mcontext32) - 33*16)
+
+#define UC_FLAVOR_VEC_SIZE (sizeof (struct gcc_mcontext32))
+
+#define UC_FLAVOR64_SIZE \
+ (sizeof (struct gcc_mcontext64) - 33*16)
+
+#define UC_FLAVOR64_VEC_SIZE (sizeof (struct gcc_mcontext64))
+
+/* Given GPRS as input to a 'sc' instruction, and OLD_CFA, update FS
+ to represent the execution of a signal return; or, if not a signal
+ return, return false. */
+
+static bool
+handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
+ _Unwind_Ptr old_cfa)
+{
+ struct gcc_ucontext *uctx;
+ bool is_64, is_vector;
+ struct gcc_float_vector_state * float_vector_state;
+ _Unwind_Ptr new_cfa;
+ int i;
+ static _Unwind_Ptr return_addr;
+
+ /* Yay! We're in a Libc that we understand, and it's made a
+ system call. It'll be one of two kinds: either a Jaguar-style
+ SYS_sigreturn, or a Panther-style 'syscall' call with 184, which
+ is also SYS_sigreturn. */
+
+ if (gprs[0] == 0x67 /* SYS_SIGRETURN */)
+ {
+ uctx = (struct gcc_ucontext *) gprs[3];
+ is_vector = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR_VEC_SIZE);
+ is_64 = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR64_SIZE);
+ }
+ else if (gprs[0] == 0 && gprs[3] == 184)
+ {
+ int ctxstyle = gprs[5];
+ uctx = (struct gcc_ucontext *) gprs[4];
+ is_vector = (ctxstyle == UC_FLAVOR_VEC || ctxstyle == UC_FLAVOR64_VEC
+ || ctxstyle == UC_TRAD_VEC || ctxstyle == UC_TRAD64_VEC);
+ is_64 = (ctxstyle == UC_FLAVOR64_VEC || ctxstyle == UC_TRAD64_VEC
+ || ctxstyle == UC_FLAVOR64 || ctxstyle == UC_TRAD64);
+ }
+ else
+ return false;
+
+#define set_offset(r, addr) \
+ (fs->regs.reg[r].how = REG_SAVED_OFFSET, \
+ fs->regs.reg[r].loc.offset = (_Unwind_Ptr)(addr) - new_cfa)
+
+ /* Restore even the registers that are not call-saved, since they
+ might be being used in the prologue to save other registers,
+ for instance GPR0 is sometimes used to save LR. */
+
+ /* Handle the GPRs, and produce the information needed to do the rest. */
+ if (is_64)
+ {
+ /* The context is 64-bit, but it doesn't carry any extra information
+ for us because only the low 32 bits of the registers are
+ call-saved. */
+ struct gcc_mcontext64 *m64 = (struct gcc_mcontext64 *)uctx->mcontext;
+ int i;
+
+ float_vector_state = &m64->fvs;
+
+ new_cfa = m64->gpr[1][1];
+
+ set_offset (CR2_REGNO, &m64->cr);
+ for (i = 0; i < 32; i++)
+ set_offset (i, m64->gpr[i] + 1);
+ set_offset (XER_REGNO, m64->xer + 1);
+ set_offset (LINK_REGISTER_REGNUM, m64->lr + 1);
+ set_offset (COUNT_REGISTER_REGNUM, m64->ctr + 1);
+ if (is_vector)
+ set_offset (VRSAVE_REGNO, &m64->vrsave);
+
+ /* Sometimes, srr0 points to the instruction that caused the exception,
+ and sometimes to the next instruction to be executed; we want
+ the latter. */
+ if (m64->exception == 3 || m64->exception == 4
+ || m64->exception == 6
+ || (m64->exception == 7 && !(m64->srr1 & 0x10000)))
+ return_addr = m64->srr0 + 4;
+ else
+ return_addr = m64->srr0;
+ }
+ else
+ {
+ struct gcc_mcontext32 *m = uctx->mcontext;
+ int i;
+
+ float_vector_state = &m->fvs;
+
+ new_cfa = m->gpr[1];
+
+ set_offset (CR2_REGNO, &m->cr);
+ for (i = 0; i < 32; i++)
+ set_offset (i, m->gpr + i);
+ set_offset (XER_REGNO, &m->xer);
+ set_offset (LINK_REGISTER_REGNUM, &m->lr);
+ set_offset (COUNT_REGISTER_REGNUM, &m->ctr);
+
+ if (is_vector)
+ set_offset (VRSAVE_REGNO, &m->vrsave);
+
+ /* Sometimes, srr0 points to the instruction that caused the exception,
+ and sometimes to the next instruction to be executed; we want
+ the latter. */
+ if (m->exception == 3 || m->exception == 4
+ || m->exception == 6
+ || (m->exception == 7 && !(m->srr1 & 0x10000)))
+ return_addr = m->srr0 + 4;
+ else
+ return_addr = m->srr0;
+ }
+
+ fs->cfa_how = CFA_REG_OFFSET;
+ fs->cfa_reg = STACK_POINTER_REGNUM;
+ fs->cfa_offset = new_cfa - old_cfa;;
+
+ /* The choice of column for the return address is somewhat tricky.
+ Fortunately, the actual choice is private to this file, and
+ the space it's reserved from is the GCC register space, not the
+ DWARF2 numbering. So any free element of the right size is an OK
+ choice. Thus: */
+ fs->retaddr_column = ARG_POINTER_REGNUM;
+ /* FIXME: this should really be done using a DWARF2 location expression,
+ not using a static variable. In fact, this entire file should
+ be implemented in DWARF2 expressions. */
+ set_offset (ARG_POINTER_REGNUM, &return_addr);
+
+ for (i = 0; i < 32; i++)
+ set_offset (32 + i, float_vector_state->fpregs + i);
+ set_offset (SPEFSCR_REGNO, &float_vector_state->fpscr);
+
+ if (is_vector)
+ {
+ for (i = 0; i < 32; i++)
+ set_offset (FIRST_ALTIVEC_REGNO + i, float_vector_state->save_vr + i);
+ set_offset (VSCR_REGNO, float_vector_state->save_vscr);
+ }
+
+ return true;
+}
+
+/* This is also prototyped in rs6000/darwin.h, inside the
+ MD_FALLBACK_FRAME_STATE_FOR macro. */
+extern bool _Unwind_fallback_frame_state_for (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs);
+
+/* Implement the MD_FALLBACK_FRAME_STATE_FOR macro,
+ returning true iff the frame was a sigreturn() frame that we
+ can understand. */
+
+bool
+_Unwind_fallback_frame_state_for (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ reg_unit gprs[32];
+
+ if (!interpret_libc (gprs, context))
+ return false;
+ return handle_syscall (fs, gprs, _Unwind_GetCFA (context));
+}
diff --git a/contrib/gcc/config/rs6000/darwin-fpsave.asm b/contrib/gcc/config/rs6000/darwin-fpsave.asm
new file mode 100644
index 0000000..86d4760
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-fpsave.asm
@@ -0,0 +1,102 @@
+/* This file contains the floating-point save and restore routines.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+/* THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.)
+
+ MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */
+
+#include "darwin-asm.h"
+
+.text
+ .align 2
+
+/* saveFP saves R0 -- assumed to be the callers LR -- to 8/16(R1). */
+
+.private_extern saveFP
+saveFP:
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stg r0,SAVED_LR_OFFSET(r1)
+ blr
+
+/* restFP restores the caller`s LR from 8/16(R1). Note that the code for
+ this starts at the offset of F30 restoration, so calling this
+ routine in an attempt to restore only F31 WILL NOT WORK (it would
+ be a stupid thing to do, anyway.) */
+
+.private_extern restFP
+restFP:
+ lfd f14,-144(r1)
+ lfd f15,-136(r1)
+ lfd f16,-128(r1)
+ lfd f17,-120(r1)
+ lfd f18,-112(r1)
+ lfd f19,-104(r1)
+ lfd f20,-96(r1)
+ lfd f21,-88(r1)
+ lfd f22,-80(r1)
+ lfd f23,-72(r1)
+ lfd f24,-64(r1)
+ lfd f25,-56(r1)
+ lfd f26,-48(r1)
+ lfd f27,-40(r1)
+ lfd f28,-32(r1)
+ lfd f29,-24(r1)
+ /* <OFFSET OF F30 RESTORE> restore callers LR */
+ lg r0,SAVED_LR_OFFSET(r1)
+ lfd f30,-16(r1)
+ /* and prepare for return to caller */
+ mtlr r0
+ lfd f31,-8(r1)
+ blr
diff --git a/contrib/gcc/config/rs6000/darwin-ldouble-format b/contrib/gcc/config/rs6000/darwin-ldouble-format
new file mode 100644
index 0000000..0012a33
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-ldouble-format
@@ -0,0 +1,84 @@
+Long double format
+==================
+
+ Each long double is made up of two IEEE doubles. The value of the
+long double is the sum of the values of the two parts (except for
+-0.0). The most significant part is required to be the value of the
+long double rounded to the nearest double, as specified by IEEE. For
+Inf values, the least significant part is required to be one of +0.0
+or -0.0. No other requirements are made; so, for example, 1.0 may be
+represented as (1.0, +0.0) or (1.0, -0.0), and the low part of a NaN
+is don't-care.
+
+Classification
+--------------
+
+A long double can represent any value of the form
+ s * 2^e * sum(k=0...105: f_k * 2^(-k))
+where 's' is +1 or -1, 'e' is between 1022 and -968 inclusive, f_0 is
+1, and f_k for k>0 is 0 or 1. These are the 'normal' long doubles.
+
+A long double can also represent any value of the form
+ s * 2^-968 * sum(k=0...105: f_k * 2^(-k))
+where 's' is +1 or -1, f_0 is 0, and f_k for k>0 is 0 or 1. These are
+the 'subnormal' long doubles.
+
+There are four long doubles that represent zero, two that represent
++0.0 and two that represent -0.0. The sign of the high part is the
+sign of the long double, and the sign of the low part is ignored.
+
+Likewise, there are four long doubles that represent infinities, two
+for +Inf and two for -Inf.
+
+Each NaN, quiet or signalling, that can be represented as a 'double'
+can be represented as a 'long double'. In fact, there are 2^64
+equivalent representations for each one.
+
+There are certain other valid long doubles where both parts are
+nonzero but the low part represents a value which has a bit set below
+2^(e-105). These, together with the subnormal long doubles, make up
+the denormal long doubles.
+
+Many possible long double bit patterns are not valid long doubles.
+These do not represent any value.
+
+Limits
+------
+
+The maximum representable long double is 2^1024-2^918. The smallest
+*normal* positive long double is 2^-968. The smallest denormalised
+positive long double is 2^-1074 (this is the same as for 'double').
+
+Conversions
+-----------
+
+A double can be converted to a long double by adding a zero low part.
+
+A long double can be converted to a double by removing the low part.
+
+Comparisons
+-----------
+
+Two long doubles can be compared by comparing the high parts, and if
+those compare equal, comparing the low parts.
+
+Arithmetic
+----------
+
+The unary negate operation operates by negating the low and high parts.
+
+An absolute or absolute-negate operation must be done by comparing
+against zero and negating if necessary.
+
+Addition and subtraction are performed using library routines. They
+are not at present performed perfectly accurately, the result produced
+will be within 1ulp of the range generated by adding or subtracting
+1ulp from the input values, where a 'ulp' is 2^(e-106) given the
+exponent 'e'. In the presence of cancellation, this may be
+arbitrarily inaccurate. Subtraction is done by negation and addition.
+
+Multiplication is also performed using a library routine. Its result
+will be within 2ulp of the correct result.
+
+Division is also performed using a library routine. Its result will
+be within 3ulp of the correct result.
diff --git a/contrib/gcc/config/rs6000/darwin-ldouble.c b/contrib/gcc/config/rs6000/darwin-ldouble.c
index 210f2d6..c30a98c 100644
--- a/contrib/gcc/config/rs6000/darwin-ldouble.c
+++ b/contrib/gcc/config/rs6000/darwin-ldouble.c
@@ -1,5 +1,6 @@
/* 128-bit long double support routines for Darwin.
- Copyright (C) 1993, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1993, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of GCC.
@@ -24,18 +25,18 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* Implementations of floating-point long double basic arithmetic
functions called by the IBM C compiler when generating code for
PowerPC platforms. In particular, the following functions are
- implemented: _xlqadd, _xlqsub, _xlqmul, and _xlqdiv. Double-double
- algorithms are based on the paper "Doubled-Precision IEEE Standard
- 754 Floating-Point Arithmetic" by W. Kahan, February 26, 1987. An
- alternative published reference is "Software for Doubled-Precision
- Floating-Point Computations", by Seppo Linnainmaa, ACM TOMS vol 7
- no 3, September 1961, pages 272-283. */
+ implemented: __gcc_qadd, __gcc_qsub, __gcc_qmul, and __gcc_qdiv.
+ Double-double algorithms are based on the paper "Doubled-Precision
+ IEEE Standard 754 Floating-Point Arithmetic" by W. Kahan, February 26,
+ 1987. An alternative published reference is "Software for
+ Doubled-Precision Floating-Point Computations", by Seppo Linnainmaa,
+ ACM TOMS vol 7 no 3, September 1981, pages 272-283. */
/* Each long double is made up of two IEEE doubles. The value of the
long double is the sum of the values of the two parts. The most
@@ -48,36 +49,48 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
This code currently assumes big-endian. */
-#if !_SOFT_FLOAT && (defined (__MACH__) || defined (__powerpc64__) || defined (_AIX))
+#if ((!defined (__NO_FPRS__) || defined (_SOFT_FLOAT)) \
+ && !defined (__LITTLE_ENDIAN__) \
+ && (defined (__MACH__) || defined (__powerpc__) || defined (_AIX)))
#define fabs(x) __builtin_fabs(x)
+#define isless(x, y) __builtin_isless (x, y)
+#define inf() __builtin_inf()
#define unlikely(x) __builtin_expect ((x), 0)
+#define nonfinite(a) unlikely (! isless (fabs (a), inf ()))
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+
/* All these routines actually take two long doubles as parameters,
but GCC currently generates poor code when a union is used to turn
a long double into a pair of doubles. */
-extern long double __gcc_qadd (double, double, double, double);
-extern long double __gcc_qsub (double, double, double, double);
-extern long double __gcc_qmul (double, double, double, double);
-extern long double __gcc_qdiv (double, double, double, double);
+long double __gcc_qadd (double, double, double, double);
+long double __gcc_qsub (double, double, double, double);
+long double __gcc_qmul (double, double, double, double);
+long double __gcc_qdiv (double, double, double, double);
-#if defined __ELF__ && defined IN_LIBGCC2_S
-/* Provide definitions of the old symbol names to statisfy apps and
+#if defined __ELF__ && defined SHARED \
+ && (defined __powerpc64__ || !(defined __linux__ || defined __gnu_hurd__))
+/* Provide definitions of the old symbol names to satisfy apps and
shared libs built against an older libgcc. To access the _xlq
symbols an explicit version reference is needed, so these won't
satisfy an unadorned reference like _xlqadd. If dot symbols are
not needed, the assembler will remove the aliases from the symbol
table. */
__asm__ (".symver __gcc_qadd,_xlqadd@GCC_3.4\n\t"
- ".symver __gcc_qsub,_xlqsub@GCC_3.4\n\t"
- ".symver __gcc_qmul,_xlqmul@GCC_3.4\n\t"
- ".symver __gcc_qdiv,_xlqdiv@GCC_3.4\n\t"
- ".symver .__gcc_qadd,._xlqadd@GCC_3.4\n\t"
- ".symver .__gcc_qsub,._xlqsub@GCC_3.4\n\t"
- ".symver .__gcc_qmul,._xlqmul@GCC_3.4\n\t"
- ".symver .__gcc_qdiv,._xlqdiv@GCC_3.4");
+ ".symver __gcc_qsub,_xlqsub@GCC_3.4\n\t"
+ ".symver __gcc_qmul,_xlqmul@GCC_3.4\n\t"
+ ".symver __gcc_qdiv,_xlqdiv@GCC_3.4\n\t"
+ ".symver .__gcc_qadd,._xlqadd@GCC_3.4\n\t"
+ ".symver .__gcc_qsub,._xlqsub@GCC_3.4\n\t"
+ ".symver .__gcc_qmul,._xlqmul@GCC_3.4\n\t"
+ ".symver .__gcc_qdiv,._xlqdiv@GCC_3.4");
#endif
typedef union
@@ -86,66 +99,44 @@ typedef union
double dval[2];
} longDblUnion;
-static const double FPKINF = 1.0/0.0;
-
/* Add two 'long double' values and return the result. */
long double
-__gcc_qadd (double a, double b, double c, double d)
+__gcc_qadd (double a, double aa, double c, double cc)
{
- longDblUnion z;
- double t, tau, u, FPR_zero, FPR_PosInf;
-
- FPR_zero = 0.0;
- FPR_PosInf = FPKINF;
-
- if (unlikely (a != a) || unlikely (c != c))
- return a + c; /* NaN result. */
+ longDblUnion x;
+ double z, q, zz, xh;
- /* Ordered operands are arranged in order of their magnitudes. */
+ z = a + c;
- /* Switch inputs if |(c,d)| > |(a,b)|. */
- if (fabs (c) > fabs (a))
+ if (nonfinite (z))
{
- t = a;
- tau = b;
- a = c;
- b = d;
- c = t;
- d = tau;
+ z = cc + aa + c + a;
+ if (nonfinite (z))
+ return z;
+ x.dval[0] = z; /* Will always be DBL_MAX. */
+ zz = aa + cc;
+ if (fabs(a) > fabs(c))
+ x.dval[1] = a - z + c + zz;
+ else
+ x.dval[1] = c - z + a + zz;
}
-
- /* b <- second largest magnitude double. */
- if (fabs (c) > fabs (b))
- {
- t = b;
- b = c;
- c = t;
- }
-
- /* Thanks to commutivity, sum is invariant w.r.t. the next
- conditional exchange. */
- tau = d + c;
-
- /* Order the smallest magnitude doubles. */
- if (fabs (d) > fabs (c))
+ else
{
- t = c;
- c = d;
- d = t;
- }
+ q = a - z;
+ zz = q + c + (a - (q + z)) + aa + cc;
- t = (tau + b) + a; /* Sum values in ascending magnitude order. */
+ /* Keep -0 result. */
+ if (zz == 0.0)
+ return z;
- /* Infinite or zero result. */
- if (unlikely (t == FPR_zero) || unlikely (fabs (t) == FPR_PosInf))
- return t;
+ xh = z + zz;
+ if (nonfinite (xh))
+ return xh;
- /* Usual case. */
- tau = (((a-t) + b) + c) + d;
- u = t + tau;
- z.dval[0] = u; /* Final fixup for long double result. */
- z.dval[1] = (t - u) + tau;
- return z.ldval;
+ x.dval[0] = xh;
+ x.dval[1] = z - xh + zz;
+ }
+ return x.ldval;
}
long double
@@ -154,32 +145,38 @@ __gcc_qsub (double a, double b, double c, double d)
return __gcc_qadd (a, b, -c, -d);
}
+#ifdef _SOFT_FLOAT
+static double fmsub (double, double, double);
+#endif
+
long double
__gcc_qmul (double a, double b, double c, double d)
{
longDblUnion z;
- double t, tau, u, v, w, FPR_zero, FPR_PosInf;
+ double t, tau, u, v, w;
- FPR_zero = 0.0;
- FPR_PosInf = FPKINF;
-
t = a * c; /* Highest order double term. */
- if (unlikely (t != t) || unlikely (t == FPR_zero)
- || unlikely (fabs (t) == FPR_PosInf))
+ if (unlikely (t == 0) /* Preserve -0. */
+ || nonfinite (t))
return t;
- /* Finite nonzero result requires summing of terms of two highest
- orders. */
+ /* Sum terms of two highest orders. */
- /* Use fused multiply-add to get low part of a * c. */
+ /* Use fused multiply-add to get low part of a * c. */
+#ifndef _SOFT_FLOAT
asm ("fmsub %0,%1,%2,%3" : "=f"(tau) : "f"(a), "f"(c), "f"(t));
+#else
+ tau = fmsub (a, c, t);
+#endif
v = a*d;
w = b*c;
tau += v + w; /* Add in other second-order terms. */
u = t + tau;
/* Construct long double result. */
+ if (nonfinite (u))
+ return u;
z.dval[0] = u;
z.dval[1] = (t - u) + tau;
return z.ldval;
@@ -189,34 +186,253 @@ long double
__gcc_qdiv (double a, double b, double c, double d)
{
longDblUnion z;
- double s, sigma, t, tau, u, v, w, FPR_zero, FPR_PosInf;
-
- FPR_zero = 0.0;
- FPR_PosInf = FPKINF;
+ double s, sigma, t, tau, u, v, w;
t = a / c; /* highest order double term */
- if (unlikely (t != t) || unlikely (t == FPR_zero)
- || unlikely (fabs (t) == FPR_PosInf))
+ if (unlikely (t == 0) /* Preserve -0. */
+ || nonfinite (t))
return t;
/* Finite nonzero result requires corrections to the highest order term. */
- s = c * t; /* (s,sigma) = c*t exactly. */
+ s = c * t; /* (s,sigma) = c*t exactly. */
w = -(-b + d * t); /* Written to get fnmsub for speed, but not
numerically necessary. */
/* Use fused multiply-add to get low part of c * t. */
+#ifndef _SOFT_FLOAT
asm ("fmsub %0,%1,%2,%3" : "=f"(sigma) : "f"(c), "f"(t), "f"(s));
+#else
+ sigma = fmsub (c, t, s);
+#endif
v = a - s;
- tau = ((v-sigma)+w)/c; /* Correction to t. */
+ tau = ((v-sigma)+w)/c; /* Correction to t. */
u = t + tau;
- /* Construct long double result. */
+ /* Construct long double result. */
+ if (nonfinite (u))
+ return u;
z.dval[0] = u;
z.dval[1] = (t - u) + tau;
return z.ldval;
}
+#if defined (_SOFT_FLOAT) && defined (__LONG_DOUBLE_128__)
+
+long double __gcc_qneg (double, double);
+int __gcc_qeq (double, double, double, double);
+int __gcc_qne (double, double, double, double);
+int __gcc_qge (double, double, double, double);
+int __gcc_qle (double, double, double, double);
+int __gcc_qunord (double, double, double, double);
+long double __gcc_stoq (float);
+long double __gcc_dtoq (double);
+float __gcc_qtos (double, double);
+double __gcc_qtod (double, double);
+int __gcc_qtoi (double, double);
+unsigned int __gcc_qtou (double, double);
+long double __gcc_itoq (int);
+long double __gcc_utoq (unsigned int);
+
+extern int __eqdf2 (double, double);
+extern int __ledf2 (double, double);
+extern int __gedf2 (double, double);
+extern int __unorddf2 (double, double);
+
+/* Negate 'long double' value and return the result. */
+long double
+__gcc_qneg (double a, double aa)
+{
+ longDblUnion x;
+
+ x.dval[0] = -a;
+ x.dval[1] = -aa;
+ return x.ldval;
+}
+
+/* Compare two 'long double' values for equality. */
+int
+__gcc_qeq (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __eqdf2 (aa, cc);
+ return 1;
+}
+
+strong_alias (__gcc_qeq, __gcc_qne);
+
+/* Compare two 'long double' values for less than or equal. */
+int
+__gcc_qle (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __ledf2 (aa, cc);
+ return __ledf2 (a, c);
+}
+
+strong_alias (__gcc_qle, __gcc_qlt);
+
+/* Compare two 'long double' values for greater than or equal. */
+int
+__gcc_qge (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __gedf2 (aa, cc);
+ return __gedf2 (a, c);
+}
+
+strong_alias (__gcc_qge, __gcc_qgt);
+
+/* Compare two 'long double' values for unordered. */
+int
+__gcc_qunord (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __unorddf2 (aa, cc);
+ return __unorddf2 (a, c);
+}
+
+/* Convert single to long double. */
+long double
+__gcc_stoq (float a)
+{
+ longDblUnion x;
+
+ x.dval[0] = (double) a;
+ x.dval[1] = 0.0;
+
+ return x.ldval;
+}
+
+/* Convert double to long double. */
+long double
+__gcc_dtoq (double a)
+{
+ longDblUnion x;
+
+ x.dval[0] = a;
+ x.dval[1] = 0.0;
+
+ return x.ldval;
+}
+
+/* Convert long double to single. */
+float
+__gcc_qtos (double a, double aa __attribute__ ((__unused__)))
+{
+ return (float) a;
+}
+
+/* Convert long double to double. */
+double
+__gcc_qtod (double a, double aa __attribute__ ((__unused__)))
+{
+ return a;
+}
+
+/* Convert long double to int. */
+int
+__gcc_qtoi (double a, double aa)
+{
+ double z = a + aa;
+ return (int) z;
+}
+
+/* Convert long double to unsigned int. */
+unsigned int
+__gcc_qtou (double a, double aa)
+{
+ double z = a + aa;
+ return (unsigned int) z;
+}
+
+/* Convert int to long double. */
+long double
+__gcc_itoq (int a)
+{
+ return __gcc_dtoq ((double) a);
+}
+
+/* Convert unsigned int to long double. */
+long double
+__gcc_utoq (unsigned int a)
+{
+ return __gcc_dtoq ((double) a);
+}
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+#include "config/soft-fp/quad.h"
+
+/* Compute floating point multiply-subtract with higher (quad) precision. */
+static double
+fmsub (double a, double b, double c)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ FP_DECL_D(B);
+ FP_DECL_D(C);
+ FP_DECL_Q(X);
+ FP_DECL_Q(Y);
+ FP_DECL_Q(Z);
+ FP_DECL_Q(U);
+ FP_DECL_Q(V);
+ FP_DECL_D(R);
+ double r;
+ long double u, v, x, y, z;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_RAW_D (A, a);
+ FP_UNPACK_RAW_D (B, b);
+ FP_UNPACK_RAW_D (C, c);
+
+ /* Extend double to quad. */
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_EXTEND(Q,D,4,2,X,A);
+ FP_EXTEND(Q,D,4,2,Y,B);
+ FP_EXTEND(Q,D,4,2,Z,C);
+#else
+ FP_EXTEND(Q,D,2,1,X,A);
+ FP_EXTEND(Q,D,2,1,Y,B);
+ FP_EXTEND(Q,D,2,1,Z,C);
+#endif
+ FP_PACK_RAW_Q(x,X);
+ FP_PACK_RAW_Q(y,Y);
+ FP_PACK_RAW_Q(z,Z);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Multiply. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_Q(X,x);
+ FP_UNPACK_Q(Y,y);
+ FP_MUL_Q(U,X,Y);
+ FP_PACK_Q(u,U);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Subtract. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(U,u);
+ FP_UNPACK_SEMIRAW_Q(Z,z);
+ FP_SUB_Q(V,U,Z);
+ FP_PACK_SEMIRAW_Q(v,V);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Truncate quad to double. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(V,v);
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_TRUNC(D,Q,2,4,R,V);
+#else
+ FP_TRUNC(D,Q,1,2,R,V);
+#endif
+ FP_PACK_SEMIRAW_D(r,R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+#endif
+
#endif
diff --git a/contrib/gcc/config/rs6000/darwin-libgcc.10.4.ver b/contrib/gcc/config/rs6000/darwin-libgcc.10.4.ver
new file mode 100644
index 0000000..019218d
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-libgcc.10.4.ver
@@ -0,0 +1,76 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdi3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldi3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/contrib/gcc/config/rs6000/darwin-libgcc.10.5.ver b/contrib/gcc/config/rs6000/darwin-libgcc.10.5.ver
new file mode 100644
index 0000000..7e0dd52
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-libgcc.10.5.ver
@@ -0,0 +1,89 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetIPInfo
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divtc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___floatundidf
+___floatundisf
+___floatunditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___multc3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powitf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/contrib/gcc/config/rs6000/darwin-tramp.asm b/contrib/gcc/config/rs6000/darwin-tramp.asm
index 22ce80a..62522b9 100644
--- a/contrib/gcc/config/rs6000/darwin-tramp.asm
+++ b/contrib/gcc/config/rs6000/darwin-tramp.asm
@@ -1,6 +1,6 @@
/* Special support for trampolines
*
- * Copyright (C) 1996, 1997, 2000 Free Software Foundation, Inc.
+ * Copyright (C) 1996, 1997, 2000, 2004, 2005 Free Software Foundation, Inc.
* Written By Michael Meissner
*
* This file is free software; you can redistribute it and/or modify it
@@ -23,8 +23,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
*
* As a special exception, if you link this library with files
* compiled with GCC to produce an executable, this does not cause the
@@ -33,22 +33,24 @@
* executable file might be covered by the GNU General Public License.
*/
+#include "darwin-asm.h"
+
/* Set up trampolines. */
.text
- .align 2
+ .align LOG2_GPR_BYTES
Ltrampoline_initial:
mflr r0
bl 1f
Lfunc = .-Ltrampoline_initial
- .long 0 /* will be replaced with function address */
+ .g_long 0 /* will be replaced with function address */
Lchain = .-Ltrampoline_initial
- .long 0 /* will be replaced with static chain */
+ .g_long 0 /* will be replaced with static chain */
1: mflr r11
- lwz r12,0(r11) /* function address */
+ lg r12,0(r11) /* function address */
mtlr r0
mtctr r12
- lwz r11,4(r11) /* static chain */
+ lg r11,GPR_BYTES(r11) /* static chain */
bctr
trampoline_size = .-Ltrampoline_initial
@@ -65,12 +67,12 @@ ___trampoline_setup:
LCF0:
mflr r11
addis r7,r11,ha16(LTRAMP-LCF0)
- lwz r7,lo16(LTRAMP-LCF0)(r7)
+ lg r7,lo16(LTRAMP-LCF0)(r7)
subi r7,r7,4
li r8,trampoline_size /* verify trampoline big enough */
- cmpw cr1,r8,r4
- srwi r4,r4,2 /* # words to move */
- addi r9,r3,-4 /* adjust pointer for lwzu */
+ cmpg cr1,r8,r4
+ srwi r4,r4,2 /* # words to move (insns always 4-byte) */
+ addi r9,r3,-4 /* adjust pointer for lgu */
mtctr r4
blt cr1,Labort
@@ -83,8 +85,8 @@ Lmove:
bdnz Lmove
/* Store correct function and static chain */
- stw r5,Lfunc(r3)
- stw r6,Lchain(r3)
+ stg r5,Lfunc(r3)
+ stg r6,Lchain(r3)
/* Now flush both caches */
mtctr r4
@@ -94,16 +96,19 @@ Lcache:
addi r3,r3,4
bdnz Lcache
- /* Finally synchronize things & return */
+ /* Ensure cache-flushing has finished. */
sync
isync
- blr
+
+ /* Make stack writeable. */
+ b ___enable_execute_stack
Labort:
#ifdef __DYNAMIC__
bl L_abort$stub
.data
-.picsymbol_stub
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .align 2
L_abort$stub:
.indirect_symbol _abort
mflr r0
@@ -112,20 +117,19 @@ L0$_abort:
mflr r11
addis r11,r11,ha16(L_abort$lazy_ptr-L0$_abort)
mtlr r0
- lwz r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11)
+ lgu r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11)
mtctr r12
- addi r11,r11,lo16(L_abort$lazy_ptr-L0$_abort)
bctr
.data
.lazy_symbol_pointer
L_abort$lazy_ptr:
.indirect_symbol _abort
- .long dyld_stub_binding_helper
+ .g_long dyld_stub_binding_helper
#else
bl _abort
#endif
.data
- .align 2
+ .align LOG2_GPR_BYTES
LTRAMP:
- .long Ltrampoline_initial
+ .g_long Ltrampoline_initial
diff --git a/contrib/gcc/config/rs6000/darwin-unwind.h b/contrib/gcc/config/rs6000/darwin-unwind.h
new file mode 100644
index 0000000..9218c5a
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-unwind.h
@@ -0,0 +1,35 @@
+/* DWARF2 EH unwinding support for Darwin.
+ Copyright (C) 2004 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combined
+ executable.)
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+extern bool _Unwind_fallback_frame_state_for
+ (struct _Unwind_Context *context, _Unwind_FrameState *fs);
+
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS) \
+ (_Unwind_fallback_frame_state_for (CONTEXT, FS) \
+ ? _URC_NO_REASON : _URC_END_OF_STACK)
diff --git a/contrib/gcc/config/rs6000/darwin-vecsave.asm b/contrib/gcc/config/rs6000/darwin-vecsave.asm
new file mode 100644
index 0000000..693879f
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-vecsave.asm
@@ -0,0 +1,165 @@
+/* This file contains the vector save and restore routines.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+/* Vector save/restore routines for Darwin. Note that each vector
+ save/restore requires 2 instructions (8 bytes.)
+
+ THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.). */
+
+ .machine ppc7400
+.text
+ .align 2
+
+.private_extern saveVEC
+saveVEC:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ blr
+
+.private_extern restVEC
+restVEC:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ blr
+
+/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */
+
+.private_extern saveVEC_vr11
+saveVEC_vr11:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ mfspr r11,VRsave
+ blr
+
+/* As restVec, but the original VRsave value passed in R10. */
+
+.private_extern restVEC_vr10
+restVEC_vr10:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ /* restore VRsave from R10. */
+ mtspr VRsave,r10
+ blr
diff --git a/contrib/gcc/config/rs6000/darwin-world.asm b/contrib/gcc/config/rs6000/darwin-world.asm
new file mode 100644
index 0000000..7ff51b5
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-world.asm
@@ -0,0 +1,269 @@
+/* This file contains the exception-handling save_world and
+ * restore_world routines, which need to do a run-time check to see if
+ * they should save and restore the vector registers.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+ .machine ppc7400
+.data
+ .align 2
+
+#ifdef __DYNAMIC__
+
+.non_lazy_symbol_pointer
+L_has_vec$non_lazy_ptr:
+ .indirect_symbol __cpu_has_altivec
+#ifdef __ppc64__
+ .quad 0
+#else
+ .long 0
+#endif
+
+#else
+
+/* For static, "pretend" we have a non-lazy-pointer. */
+
+L_has_vec$non_lazy_ptr:
+ .long __cpu_has_altivec
+
+#endif
+
+
+.text
+ .align 2
+
+/* save_world and rest_world save/restore F14-F31 and possibly V20-V31
+ (assuming you have a CPU with vector registers; we use a global var
+ provided by the System Framework to determine this.)
+
+ SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11
+ (the stack frame size) as parameters. It returns VRsave in R0 if
+ we`re on a CPU with vector regs.
+
+ With gcc3, we now need to save and restore CR as well, since gcc3's
+ scheduled prologs can cause comparisons to be moved before calls to
+ save_world!
+
+ USES: R0 R11 R12 */
+
+.private_extern save_world
+save_world:
+ stw r0,8(r1)
+ mflr r0
+ bcl 20,31,Ls$pb
+Ls$pb: mflr r12
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12)
+ mtlr r0
+ lwz r12,0(r12)
+ /* grab CR */
+ mfcr r0
+ /* test HAS_VEC */
+ cmpwi r12,0
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stmw r13,-220(r1)
+ /* stash CR */
+ stw r0,4(r1)
+ /* set R12 pointing at Vector Reg save area */
+ addi r12,r1,-224
+ /* allocate stack frame */
+ stwux r1,r1,r11
+ /* ...but return if HAS_VEC is zero */
+ bne+ L$saveVMX
+ /* Not forgetting to restore CR. */
+ mtcr r0
+ blr
+
+L$saveVMX:
+ /* We're saving Vector regs too. */
+ /* Restore CR from R0. No More Branches! */
+ mtcr r0
+
+ /* We should really use VRSAVE to figure out which vector regs
+ we actually need to save and restore. Some other time :-/ */
+
+ li r11,-192
+ stvx v20,r11,r12
+ li r11,-176
+ stvx v21,r11,r12
+ li r11,-160
+ stvx v22,r11,r12
+ li r11,-144
+ stvx v23,r11,r12
+ li r11,-128
+ stvx v24,r11,r12
+ li r11,-112
+ stvx v25,r11,r12
+ li r11,-96
+ stvx v26,r11,r12
+ li r11,-80
+ stvx v27,r11,r12
+ li r11,-64
+ stvx v28,r11,r12
+ li r11,-48
+ stvx v29,r11,r12
+ li r11,-32
+ stvx v30,r11,r12
+ mfspr r0,VRsave
+ li r11,-16
+ stvx v31,r11,r12
+ /* VRsave lives at -224(R1) */
+ stw r0,0(r12)
+ blr
+
+
+/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR.
+ R10 is the C++ EH stack adjust parameter, we return to the caller`s caller.
+
+ USES: R0 R10 R11 R12 and R7 R8
+ RETURNS: C++ EH Data registers (R3 - R6.)
+
+ We now set up R7/R8 and jump to rest_world_eh_r7r8.
+
+ rest_world doesn't use the R10 stack adjust parameter, nor does it
+ pick up the R3-R6 exception handling stuff. */
+
+.private_extern rest_world
+rest_world:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ li r7, 0
+ lwz r8, 8(r11)
+ li r10, 0
+ b rest_world_eh_r7r8
+
+.private_extern eh_rest_world_r10
+eh_rest_world_r10:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ mr r7,r10
+ lwz r8, 8(r11)
+ /* pickup the C++ EH data regs (R3 - R6.) */
+ lwz r6,-420(r11)
+ lwz r5,-424(r11)
+ lwz r4,-428(r11)
+ lwz r3,-432(r11)
+
+ b rest_world_eh_r7r8
+
+/* rest_world_eh_r7r8 is jumped to -- not called! -- when we're doing
+ the exception-handling epilog. R7 contains the offset to add to
+ the SP, and R8 contains the 'real' return address.
+
+ USES: R0 R11 R12 [R7/R8]
+ RETURNS: C++ EH Data registers (R3 - R6.) */
+
+rest_world_eh_r7r8:
+ bcl 20,31,Lr7r8$pb
+Lr7r8$pb: mflr r12
+ lwz r11,0(r1)
+ /* R11 := previous SP */
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7r8$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7r8$pb)(r12)
+ lwz r0,4(r11)
+ /* R0 := old CR */
+ lwz r12,0(r12)
+ /* R12 := HAS_VEC */
+ mtcr r0
+ cmpwi r12,0
+ lmw r13,-220(r11)
+ beq L.rest_world_fp_eh
+ /* restore VRsave and V20..V31 */
+ lwz r0,-224(r11)
+ li r12,-416
+ mtspr VRsave,r0
+ lvx v20,r11,r12
+ li r12,-400
+ lvx v21,r11,r12
+ li r12,-384
+ lvx v22,r11,r12
+ li r12,-368
+ lvx v23,r11,r12
+ li r12,-352
+ lvx v24,r11,r12
+ li r12,-336
+ lvx v25,r11,r12
+ li r12,-320
+ lvx v26,r11,r12
+ li r12,-304
+ lvx v27,r11,r12
+ li r12,-288
+ lvx v28,r11,r12
+ li r12,-272
+ lvx v29,r11,r12
+ li r12,-256
+ lvx v30,r11,r12
+ li r12,-240
+ lvx v31,r11,r12
+
+L.rest_world_fp_eh:
+ lfd f14,-144(r11)
+ lfd f15,-136(r11)
+ lfd f16,-128(r11)
+ lfd f17,-120(r11)
+ lfd f18,-112(r11)
+ lfd f19,-104(r11)
+ lfd f20,-96(r11)
+ lfd f21,-88(r11)
+ lfd f22,-80(r11)
+ lfd f23,-72(r11)
+ lfd f24,-64(r11)
+ lfd f25,-56(r11)
+ lfd f26,-48(r11)
+ lfd f27,-40(r11)
+ lfd f28,-32(r11)
+ lfd f29,-24(r11)
+ lfd f30,-16(r11)
+ /* R8 is the exception-handler's address */
+ mtctr r8
+ lfd f31,-8(r11)
+ /* set SP to original value + R7 offset */
+ add r1,r11,r7
+ bctr
diff --git a/contrib/gcc/config/rs6000/darwin.h b/contrib/gcc/config/rs6000/darwin.h
index cae8bac..7bd53fc 100644
--- a/contrib/gcc/config/rs6000/darwin.h
+++ b/contrib/gcc/config/rs6000/darwin.h
@@ -1,5 +1,6 @@
/* Target definitions for PowerPC running Darwin (Mac OS X).
- Copyright (C) 1997, 2000, 2001, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
Contributed by Apple Computer Inc.
This file is part of GCC.
@@ -16,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC)");
@@ -26,106 +27,180 @@
#define DEFAULT_ABI ABI_DARWIN
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+#ifdef __powerpc64__
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+
/* The object file format is Mach-O. */
#define TARGET_OBJECT_FORMAT OBJECT_MACHO
+/* Size of the Obj-C jump buffer. */
+#define OBJC_JBLEN ((TARGET_64BIT) ? (26*2 + 18*2 + 129 + 1) : (26 + 18*2 + 129 + 1))
+
/* We're not ever going to do TOCs. */
#define TARGET_TOC 0
#define TARGET_NO_TOC 1
-/* Darwin switches. */
-/* Use dynamic-no-pic codegen (no picbase reg; not suitable for shlibs.) */
-#define MASK_MACHO_DYNAMIC_NO_PIC 0x00800000
-
-#define TARGET_DYNAMIC_NO_PIC (target_flags & MASK_MACHO_DYNAMIC_NO_PIC)
-
-/* Handle #pragma weak and #pragma pack. */
-#define HANDLE_SYSV_PRAGMA 1
+/* Override the default rs6000 definition. */
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int")
+/* Translate config/rs6000/darwin.opt to config/darwin.h. */
+#define TARGET_DYNAMIC_NO_PIC (TARGET_MACHO_DYNAMIC_NO_PIC)
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
- builtin_define ("__ppc__"); \
+ if (!TARGET_64BIT) builtin_define ("__ppc__"); \
+ if (TARGET_64BIT) builtin_define ("__ppc64__"); \
builtin_define ("__POWERPC__"); \
builtin_define ("__NATURAL_ALIGNMENT__"); \
- builtin_define ("__MACH__"); \
- builtin_define ("__APPLE__"); \
+ darwin_cpp_builtins (pfile); \
} \
while (0)
-/* */
-#undef SUBTARGET_SWITCHES
-#define SUBTARGET_SWITCHES \
- {"dynamic-no-pic", MASK_MACHO_DYNAMIC_NO_PIC, \
- N_("Generate code suitable for executables (NOT shared libs)")}, \
- {"no-dynamic-no-pic", -MASK_MACHO_DYNAMIC_NO_PIC, ""},
-
-
-/* The Darwin ABI always includes AltiVec, can't be (validly) turned
- off. */
-
-#define SUBTARGET_OVERRIDE_OPTIONS \
+#define SUBTARGET_OVERRIDE_OPTIONS \
do { \
+ /* The Darwin ABI always includes AltiVec, can't be (validly) turned \
+ off. */ \
rs6000_altivec_abi = 1; \
- rs6000_altivec_vrsave = 1; \
+ TARGET_ALTIVEC_VRSAVE = 1; \
if (DEFAULT_ABI == ABI_DARWIN) \
{ \
if (MACHO_DYNAMIC_NO_PIC_P) \
{ \
if (flag_pic) \
- warning ("-mdynamic-no-pic overrides -fpic or -fPIC"); \
+ warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC"); \
flag_pic = 0; \
} \
else if (flag_pic == 1) \
{ \
- /* Darwin doesn't support -fpic. */ \
- warning ("-fpic is not supported; -fPIC assumed"); \
flag_pic = 2; \
} \
} \
-}while(0)
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning (0, "-m64 requires PowerPC64 architecture, enabling"); \
+ } \
+ if (flag_mkernel) \
+ { \
+ rs6000_default_long_calls = 1; \
+ target_flags |= MASK_SOFT_FLOAT; \
+ } \
+ \
+ /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes \
+ Altivec. */ \
+ if (!flag_mkernel && !flag_apple_kext \
+ && TARGET_64BIT \
+ && ! (target_flags_explicit & MASK_ALTIVEC)) \
+ target_flags |= MASK_ALTIVEC; \
+ \
+ /* Unless the user (not the configurer) has explicitly overridden \
+ it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to \
+ G4 unless targetting the kernel. */ \
+ if (!flag_mkernel \
+ && !flag_apple_kext \
+ && darwin_macosx_version_min \
+ && strverscmp (darwin_macosx_version_min, "10.5") >= 0 \
+ && ! (target_flags_explicit & MASK_ALTIVEC) \
+ && ! rs6000_select[1].string) \
+ { \
+ target_flags |= MASK_ALTIVEC; \
+ } \
+} while(0)
+
+#define C_COMMON_OVERRIDE_OPTIONS do { \
+ /* On powerpc, __cxa_get_exception_ptr is available starting in the \
+ 10.4.6 libstdc++.dylib. */ \
+ if ((! darwin_macosx_version_min \
+ || strverscmp (darwin_macosx_version_min, "10.4.6") < 0) \
+ && flag_use_cxa_get_exception_ptr == 2) \
+ flag_use_cxa_get_exception_ptr = 0; \
+ if (flag_mkernel) \
+ flag_no_builtin = 1; \
+ SUBTARGET_C_COMMON_OVERRIDE_OPTIONS; \
+} while (0)
+
+/* Darwin has 128-bit long double support in libc in 10.4 and later.
+ Default to 128-bit long doubles even on earlier platforms for ABI
+ consistency; arithmetic will work even if libc and libm support is
+ not available. */
+
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
+
/* We want -fPIC by default, unless we're using -static to compile for
the kernel or some such. */
-
#define CC1_SPEC "\
-%{gused: -feliminate-unused-debug-symbols %<gused }\
-%{static: %{Zdynamic: %e conflicting code gen style switches are used}}\
-%{!static:%{!mdynamic-no-pic:-fPIC}}"
-
-/* It's virtually impossible to predict all the possible combinations
- of -mcpu and -maltivec and whatnot, so just supply
- -force_cpusubtype_ALL if any are seen. Radar 3492132 against the
- assembler is asking for a .machine directive so we could get this
- really right. */
-#define ASM_SPEC "-arch ppc \
- %{Zforce_cpusubtype_ALL:-force_cpusubtype_ALL} \
- %{!Zforce_cpusubtype_ALL:%{maltivec|mcpu=*|mpowerpc64:-force_cpusubtype_ALL}}"
+ %{g: %{!fno-eliminate-unused-debug-symbols: -feliminate-unused-debug-symbols }} \
+ %{static: %{Zdynamic: %e conflicting code gen style switches are used}}\
+ %{!mkernel:%{!static:%{!mdynamic-no-pic:-fPIC}}}"
+
+#define DARWIN_ARCH_SPEC "%{m64:ppc64;:ppc}"
+
+#define DARWIN_SUBARCH_SPEC " \
+ %{m64: ppc64} \
+ %{!m64: \
+ %{mcpu=601:ppc601; \
+ mcpu=603:ppc603; \
+ mcpu=603e:ppc603; \
+ mcpu=604:ppc604; \
+ mcpu=604e:ppc604e; \
+ mcpu=740:ppc750; \
+ mcpu=750:ppc750; \
+ mcpu=G3:ppc750; \
+ mcpu=7400:ppc7400; \
+ mcpu=G4:ppc7400; \
+ mcpu=7450:ppc7450; \
+ mcpu=970:ppc970; \
+ mcpu=power4:ppc970; \
+ mcpu=G5:ppc970; \
+ :ppc}}"
+
+/* crt2.o is at least partially required for 10.3.x and earlier. */
+#define DARWIN_CRT2_SPEC \
+ "%{!m64:%:version-compare(!> 10.4 mmacosx-version-min= crt2.o%s)}"
#undef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS \
- { "darwin_arch", "ppc" },
-
-/* The "-faltivec" option should have been called "-maltivec" all along. */
-#define SUBTARGET_OPTION_TRANSLATE_TABLE \
- { "-faltivec", "-maltivec -include altivec.h" }, \
- { "-fno-altivec", "-mno-altivec" }, \
- { "-Waltivec-long-deprecated", "-mwarn-altivec-long" }, \
+ { "darwin_arch", DARWIN_ARCH_SPEC }, \
+ { "darwin_crt2", DARWIN_CRT2_SPEC }, \
+ { "darwin_subarch", DARWIN_SUBARCH_SPEC },
+
+/* Output a .machine directive. */
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START rs6000_darwin_file_start
+
+/* The "-faltivec" option should have been called "-maltivec" all
+ along. -ffix-and-continue and -findirect-data is for compatibility
+ for old compilers. */
+
+#define SUBTARGET_OPTION_TRANSLATE_TABLE \
+ { "-ffix-and-continue", "-mfix-and-continue" }, \
+ { "-findirect-data", "-mfix-and-continue" }, \
+ { "-faltivec", "-maltivec -include altivec.h" }, \
+ { "-fno-altivec", "-mno-altivec" }, \
+ { "-Waltivec-long-deprecated", "-mwarn-altivec-long" }, \
{ "-Wno-altivec-long-deprecated", "-mno-warn-altivec-long" }
-/* Make both r2 and r3 available for allocation. */
+/* Make both r2 and r13 available for allocation. */
#define FIXED_R2 0
#define FIXED_R13 0
/* Base register for access to local variables of the function. */
-#undef FRAME_POINTER_REGNUM
-#define FRAME_POINTER_REGNUM 30
+#undef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM 30
#undef RS6000_PIC_OFFSET_TABLE_REGNUM
#define RS6000_PIC_OFFSET_TABLE_REGNUM 31
@@ -134,9 +209,10 @@ do { \
#undef STARTING_FRAME_OFFSET
#define STARTING_FRAME_OFFSET \
- (RS6000_ALIGN (current_function_outgoing_args_size, 16) \
- + RS6000_VARARGS_AREA \
- + RS6000_SAVE_AREA)
+ (FRAME_GROWS_DOWNWARD \
+ ? 0 \
+ : (RS6000_ALIGN (current_function_outgoing_args_size, 16) \
+ + RS6000_SAVE_AREA))
#undef STACK_DYNAMIC_OFFSET
#define STACK_DYNAMIC_OFFSET(FUNDECL) \
@@ -146,7 +222,7 @@ do { \
/* These are used by -fbranch-probabilities */
#define HOT_TEXT_SECTION_NAME "__TEXT,__text,regular,pure_instructions"
#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME \
- "__TEXT,__text2,regular,pure_instructions"
+ "__TEXT,__unlikely,regular,pure_instructions"
/* Define cutoff for using external functions to save floating point.
Currently on Darwin, always use inline stores. */
@@ -154,6 +230,10 @@ do { \
#undef FP_SAVE_INLINE
#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64)
+/* Darwin uses a function call if everything needs to be saved/restored. */
+#undef WORLD_SAVE_P
+#define WORLD_SAVE_P(INFO) ((INFO)->world_save_p)
+
/* The assembler wants the alternate register names, but without
leading percent sign. */
#undef REGISTER_NAMES
@@ -175,7 +255,8 @@ do { \
"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
"vrsave", "vscr", \
- "spe_acc", "spefscr" \
+ "spe_acc", "spefscr", \
+ "sfp" \
}
/* This outputs NAME to FILE. */
@@ -199,13 +280,14 @@ do { \
/* This says how to output an assembler line to define a global common
symbol. */
-/* ? */
-#undef ASM_OUTPUT_ALIGNED_COMMON
-#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
- do { fputs (".comm ", (FILE)); \
- RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
- fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
- (SIZE)); } while (0)
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ do { \
+ unsigned HOST_WIDE_INT _new_size = SIZE; \
+ fputs (".comm ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ if (_new_size == 0) _new_size = 1; \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", _new_size); \
+ } while (0)
/* Override the standard rs6000 definition. */
@@ -233,6 +315,19 @@ do { \
fprintf (FILE, "\t.align32 %d,0x60000000\n", (LOG)); \
} while (0)
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* This is supported in cctools 465 and later. The macro test
+ above prevents using it in earlier build environments. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
+ }
+#endif
+
/* Generate insns to call the profiler. */
#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
@@ -241,10 +336,12 @@ do { \
#define RS6000_MCOUNT "*mcount"
-/* Default processor: a G4. */
+/* Default processor: G4, and G5 for 64-bit. */
#undef PROCESSOR_DEFAULT
#define PROCESSOR_DEFAULT PROCESSOR_PPC7400
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4
/* Default target flag settings. Despite the fact that STMW/LMW
serializes, it's still a big code size win to use them. Use FSEL by
@@ -254,9 +351,15 @@ do { \
#define TARGET_DEFAULT (MASK_POWERPC | MASK_MULTIPLE | MASK_NEW_MNEMONICS \
| MASK_PPC_GFXOPT)
+/* Darwin only runs on PowerPC, so short-circuit POWER patterns. */
+#undef TARGET_POWER
+#define TARGET_POWER 0
+#undef TARGET_IEEEQUAD
+#define TARGET_IEEEQUAD 0
+
/* Since Darwin doesn't do TOCs, stub this out. */
-#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) 0
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) ((void)X, (void)MODE, 0)
/* Unlike most other PowerPC targets, chars are signed, for
consistency with other Darwin architectures. */
@@ -264,11 +367,11 @@ do { \
#undef DEFAULT_SIGNED_CHAR
#define DEFAULT_SIGNED_CHAR (1)
-/* Given an rtx X being reloaded into a reg required to be
- in class CLASS, return the class of reg to actually use.
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
In general this is just CLASS; but on some machines
in some cases it is preferable to use a more restrictive class.
-
+
On the RS/6000, we have to return NO_REGS when we want to reload a
floating-point CONST_DOUBLE to force it to be copied to memory.
@@ -277,8 +380,8 @@ do { \
#undef PREFERRED_RELOAD_CLASS
#define PREFERRED_RELOAD_CLASS(X,CLASS) \
- ((GET_CODE (X) == CONST_DOUBLE \
- && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT) \
+ ((CONSTANT_P (X) \
+ && reg_classes_intersect_p ((CLASS), FLOAT_REGS)) \
? NO_REGS \
: ((GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == HIGH) \
&& reg_class_subset_p (BASE_REGS, (CLASS))) \
@@ -308,23 +411,64 @@ do { \
|| TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
&& TARGET_ALIGN_NATURAL == 0 \
? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
- : (TARGET_ALTIVEC && TREE_CODE (STRUCT) == VECTOR_TYPE) \
+ : (TREE_CODE (STRUCT) == VECTOR_TYPE \
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (STRUCT))) \
? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
: MAX ((COMPUTED), (SPECIFIED)))
+/* Specify padding for the last element of a block move between
+ registers and memory. FIRST is nonzero if this is the only
+ element. */
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE))
+
/* XXX: Darwin supports neither .quad, or .llong, but it also doesn't
support 64 bit PowerPC either, so this just keeps things happy. */
#define DOUBLE_INT_ASM_OP "\t.quad\t"
-/* Get HOST_WIDE_INT and CONST_INT to be 32 bits, for compile time
- space/speed. */
-#undef MAX_LONG_TYPE_SIZE
-#define MAX_LONG_TYPE_SIZE 32
-
/* For binary compatibility with 2.95; Darwin C APIs use bool from
- stdbool.h, which was an int-sized enum in 2.95. */
-#define BOOL_TYPE_SIZE INT_TYPE_SIZE
+ stdbool.h, which was an int-sized enum in 2.95. Users can explicitly
+ choose to have sizeof(bool)==1 with the -mone-byte-bool switch. */
+#define BOOL_TYPE_SIZE (darwin_one_byte_bool ? CHAR_TYPE_SIZE : INT_TYPE_SIZE)
#undef REGISTER_TARGET_PRAGMAS
-#define REGISTER_TARGET_PRAGMAS DARWIN_REGISTER_TARGET_PRAGMAS
+#define REGISTER_TARGET_PRAGMAS() \
+ do \
+ { \
+ DARWIN_REGISTER_TARGET_PRAGMAS(); \
+ targetm.resolve_overloaded_builtin = altivec_resolve_overloaded_builtin; \
+ } \
+ while (0)
+
+#ifdef IN_LIBGCC2
+#include <stdbool.h>
+#endif
+
+#define MD_UNWIND_SUPPORT "config/rs6000/darwin-unwind.h"
+
+#define HAS_MD_FALLBACK_FRAME_STATE_FOR 1
+
+/* True, iff we're generating fast turn around debugging code. When
+ true, we arrange for function prologues to start with 5 nops so
+ that gdb may insert code to redirect them, and for data to be
+ accessed indirectly. The runtime uses this indirection to forward
+ references for data to the original instance of that data. */
+
+#define TARGET_FIX_AND_CONTINUE (darwin_fix_and_continue)
+
+/* This is the reserved direct dispatch address for Objective-C. */
+#define OFFS_MSGSEND_FAST 0xFFFEFF00
+
+/* This is the reserved ivar address Objective-C. */
+#define OFFS_ASSIGNIVAR_FAST 0xFFFEFEC0
+
+/* Old versions of Mac OS/Darwin don't have C99 functions available. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS \
+ (TARGET_64BIT \
+ || (darwin_macosx_version_min \
+ && strverscmp (darwin_macosx_version_min, "10.3") >= 0))
+/* When generating kernel code or kexts, we don't use Altivec by
+ default, as kernel code doesn't save/restore those registers. */
+#define OS_MISSING_ALTIVEC (flag_mkernel || flag_apple_kext)
diff --git a/contrib/gcc/config/rs6000/darwin.md b/contrib/gcc/config/rs6000/darwin.md
new file mode 100644
index 0000000..c8e3287
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin.md
@@ -0,0 +1,440 @@
+/* Machine description patterns for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+(define_insn "adddi3_high"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (high:DI (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{cau|addis} %0,%1,ha16(%2)"
+ [(set_attr "length" "4")])
+
+(define_insn "movdf_low_si"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
+ (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && !TARGET_64BIT"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"lfd %0,lo16(%2)(%1)\";
+ case 1:
+ {
+ if (TARGET_POWERPC64 && TARGET_32BIT)
+ /* Note, old assemblers didn't support relocation here. */
+ return \"ld %0,lo16(%2)(%1)\";
+ else
+ {
+ output_asm_insn (\"{cal|la} %0,lo16(%2)(%1)\", operands);
+ output_asm_insn (\"{l|lwz} %L0,4(%0)\", operands);
+ return (\"{l|lwz} %0,0(%0)\");
+ }
+ }
+ default:
+ gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4,12")])
+
+
+(define_insn "movdf_low_di"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
+ (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"lfd %0,lo16(%2)(%1)\";
+ case 1:
+ return \"ld %0,lo16(%2)(%1)\";
+ default:
+ gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4,4")])
+
+(define_insn "movdf_low_st_si"
+ [(set (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DF 0 "gpc_reg_operand" "f"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "stfd %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movdf_low_st_di"
+ [(set (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DF 0 "gpc_reg_operand" "f"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "stfd %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_si"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
+ (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "@
+ lfs %0,lo16(%2)(%1)
+ {l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_di"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
+ (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "@
+ lfs %0,lo16(%2)(%1)
+ {l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_st_si"
+ [(set (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" "")))
+ (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "@
+ stfs %0,lo16(%2)(%1)
+ {st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_st_di"
+ [(set (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" "")))
+ (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "@
+ stfs %0,lo16(%2)(%1)
+ {st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+;; 64-bit MachO load/store support
+(define_insn "movdi_low"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{l|ld} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_low_st"
+ [(set (mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:SI 0 "gpc_reg_operand" "r"))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movdi_low_st"
+ [(set (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DI 0 "gpc_reg_operand" "r"))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{st|std} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+;; Mach-O PIC trickery.
+(define_expand "macho_high"
+ [(set (match_operand 0 "" "")
+ (high (match_operand 1 "" "")))]
+ "TARGET_MACHO"
+{
+ if (TARGET_64BIT)
+ emit_insn (gen_macho_high_di (operands[0], operands[1]));
+ else
+ emit_insn (gen_macho_high_si (operands[0], operands[1]));
+
+ DONE;
+})
+
+(define_insn "macho_high_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{liu|lis} %0,ha16(%1)")
+
+
+(define_insn "macho_high_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b*r")
+ (high:DI (match_operand 1 "" "")))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{liu|lis} %0,ha16(%1)")
+
+(define_expand "macho_low"
+ [(set (match_operand 0 "" "")
+ (lo_sum (match_operand 1 "" "")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO"
+{
+ if (TARGET_64BIT)
+ emit_insn (gen_macho_low_di (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_macho_low_si (operands[0], operands[1], operands[2]));
+
+ DONE;
+})
+
+(define_insn "macho_low_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "@
+ {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
+ {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
+
+(define_insn "macho_low_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "@
+ {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
+ {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
+
+(define_split
+ [(set (mem:V4SI (plus:DI (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "short_cint_operand" "")))
+ (match_operand:V4SI 2 "register_operand" ""))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" ""))]
+ "TARGET_MACHO && TARGET_64BIT"
+ [(set (match_dup 3) (plus:DI (match_dup 0) (match_dup 1)))
+ (set (mem:V4SI (match_dup 3))
+ (match_dup 2))]
+ "")
+
+(define_expand "load_macho_picbase"
+ [(set (match_operand 0 "" "")
+ (unspec [(match_operand 1 "" "")]
+ UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_load_macho_picbase_si (operands[0], operands[1]));
+ else
+ emit_insn (gen_load_macho_picbase_di (operands[0], operands[1]));
+
+ DONE;
+})
+
+(define_insn "load_macho_picbase_si"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "s")
+ (pc)] UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+ "bcl 20,31,%1\\n%1:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_macho_picbase_di"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (unspec:DI [(match_operand:DI 1 "immediate_operand" "s")
+ (pc)] UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic && TARGET_64BIT"
+ "bcl 20,31,%1\\n%1:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_expand "macho_correct_pic"
+ [(set (match_operand 0 "" "")
+ (plus (match_operand 1 "" "")
+ (unspec [(match_operand 2 "" "")
+ (match_operand 3 "" "")]
+ UNSPEC_MPIC_CORRECT)))]
+ "DEFAULT_ABI == ABI_DARWIN"
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_macho_correct_pic_si (operands[0], operands[1], operands[2],
+ operands[3]));
+ else
+ emit_insn (gen_macho_correct_pic_di (operands[0], operands[1], operands[2],
+ operands[3]));
+
+ DONE;
+})
+
+(define_insn "macho_correct_pic_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "immediate_operand" "s")
+ (match_operand:SI 3 "immediate_operand" "s")]
+ UNSPEC_MPIC_CORRECT)))]
+ "DEFAULT_ABI == ABI_DARWIN"
+ "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)"
+ [(set_attr "length" "8")])
+
+(define_insn "macho_correct_pic_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "immediate_operand" "s")
+ (match_operand:DI 3 "immediate_operand" "s")]
+ 16)))]
+ "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT"
+ "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)"
+ [(set_attr "length" "8")])
+
+(define_insn "*call_indirect_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 3 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT"
+{
+ return "b%T0l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+{
+#if TARGET_MACHO
+ return output_call(insn, operands, 0, 2);
+#else
+ gcc_unreachable ();
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_indirect_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 4 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_DARWIN"
+{
+ return "b%T1l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_value_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 4 "=l,l"))]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+{
+#if TARGET_MACHO
+ return output_call(insn, operands, 1, 3);
+#else
+ gcc_unreachable ();
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "immediate_operand" "O,n"))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+{
+ return "b %z0";
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_value_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ return \"b %z1\";
+}"
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*sibcall_symbolic_64"
+ [(call (mem:SI (match_operand:DI 0 "call_operand" "s,c")) ; 64
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z0\";
+ case 1: return \"b%T0\";
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbolic_64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "call_operand" "s,c"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "" ""))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z1\";
+ case 1: return \"b%T1\";
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
diff --git a/contrib/gcc/config/rs6000/darwin.opt b/contrib/gcc/config/rs6000/darwin.opt
new file mode 100644
index 0000000..99b38ec
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin.opt
@@ -0,0 +1,33 @@
+; Darwin options for PPC port.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+m64
+Target RejectNegative Mask(64BIT)
+Generate 64-bit code
+
+m32
+Target RejectNegative InverseMask(64BIT)
+Generate 32-bit code
+
+mdynamic-no-pic
+Target Report Mask(MACHO_DYNAMIC_NO_PIC)
+Generate code suitable for executables (NOT shared libs)
diff --git a/contrib/gcc/config/rs6000/darwin64.h b/contrib/gcc/config/rs6000/darwin64.h
new file mode 100644
index 0000000..80e802d
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin64.h
@@ -0,0 +1,36 @@
+/* Target definitions for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC64)");
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_POWERPC64 | MASK_64BIT \
+ | MASK_MULTIPLE | MASK_NEW_MNEMONICS | MASK_PPC_GFXOPT)
+
+#undef DARWIN_ARCH_SPEC
+#define DARWIN_ARCH_SPEC "ppc64"
+
+#undef DARWIN_SUBARCH_SPEC
+#define DARWIN_SUBARCH_SPEC DARWIN_ARCH_SPEC
+
+#undef DARWIN_CRT2_SPEC
+#define DARWIN_CRT2_SPEC ""
diff --git a/contrib/gcc/config/rs6000/darwin7.h b/contrib/gcc/config/rs6000/darwin7.h
new file mode 100644
index 0000000..4c1cda3
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin7.h
@@ -0,0 +1,31 @@
+/* Target definitions for Darwin 7.x (Mac OS X) systems.
+ Copyright (C) 2004, 2005
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Machine dependent libraries. Include libmx when compiling for
+ Darwin 7.0 and above, but before libSystem, since the functions are
+ actually in libSystem but for 7.x compatibility we want them to be
+ looked for in libmx first. Include libmx by default because otherwise
+ libstdc++ isn't usable. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!static:\
+ %:version-compare(!< 10.3 mmacosx-version-min= -lmx)\
+ -lSystem}"
diff --git a/contrib/gcc/config/rs6000/darwin8.h b/contrib/gcc/config/rs6000/darwin8.h
new file mode 100644
index 0000000..ee583a2
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin8.h
@@ -0,0 +1,33 @@
+/* Target definitions for Darwin 8.0 and above (Mac OS X) systems.
+ Copyright (C) 2004, 2005
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Machine dependent libraries. Include libmx when compiling on
+ Darwin 7.0 and above, but before libSystem, since the functions are
+ actually in libSystem but for 7.x compatibility we want them to be
+ looked for in libmx first---but only do this if 7.x compatibility
+ is a concern, which it's not in 64-bit mode. Include
+ libSystemStubs when compiling on (not necessarily for) 8.0 and
+ above and not 64-bit long double. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!static:\
+ %{!mlong-double-64:%{pg:-lSystemStubs_profile;:-lSystemStubs}} \
+ %{!m64:%:version-compare(>< 10.3 10.4 mmacosx-version-min= -lmx)} -lSystem}"
diff --git a/contrib/gcc/config/rs6000/default64.h b/contrib/gcc/config/rs6000/default64.h
index c6ed142..10fe09c 100644
--- a/contrib/gcc/config/rs6000/default64.h
+++ b/contrib/gcc/config/rs6000/default64.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for 64 bit powerpc linux defaulting to -m64.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2005 Free Software Foundation, Inc.
This file is part of GCC.
@@ -16,9 +16,10 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT \
- (MASK_POWERPC | MASK_POWERPC64 | MASK_64BIT | MASK_NEW_MNEMONICS)
+ (MASK_POWERPC | MASK_PPC_GFXOPT | \
+ MASK_POWERPC64 | MASK_64BIT | MASK_NEW_MNEMONICS)
diff --git a/contrib/gcc/config/rs6000/e500-double.h b/contrib/gcc/config/rs6000/e500-double.h
new file mode 100644
index 0000000..55587e4
--- /dev/null
+++ b/contrib/gcc/config/rs6000/e500-double.h
@@ -0,0 +1,25 @@
+/* Target definitions for E500 with double precision FP.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#undef SUB3TARGET_OVERRIDE_OPTIONS
+#define SUB3TARGET_OVERRIDE_OPTIONS \
+ if (!rs6000_explicit_options.float_gprs) \
+ rs6000_float_gprs = 2;
diff --git a/contrib/gcc/config/rs6000/eabi-ci.asm b/contrib/gcc/config/rs6000/eabi-ci.asm
index 447b4e2..d32120f 100644
--- a/contrib/gcc/config/rs6000/eabi-ci.asm
+++ b/contrib/gcc/config/rs6000/eabi-ci.asm
@@ -22,8 +22,8 @@ General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA.
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.
As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
diff --git a/contrib/gcc/config/rs6000/eabi-cn.asm b/contrib/gcc/config/rs6000/eabi-cn.asm
index b2c6095..90a5da7 100644
--- a/contrib/gcc/config/rs6000/eabi-cn.asm
+++ b/contrib/gcc/config/rs6000/eabi-cn.asm
@@ -22,8 +22,8 @@ General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA.
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.
As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
diff --git a/contrib/gcc/config/rs6000/eabi.asm b/contrib/gcc/config/rs6000/eabi.asm
index c7876bc..b5d4505 100644
--- a/contrib/gcc/config/rs6000/eabi.asm
+++ b/contrib/gcc/config/rs6000/eabi.asm
@@ -24,8 +24,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
*
* As a special exception, if you link this library with files
* compiled with GCC to produce an executable, this does not cause
diff --git a/contrib/gcc/config/rs6000/eabi.h b/contrib/gcc/config/rs6000/eabi.h
index ff8df2c..323e5ad 100644
--- a/contrib/gcc/config/rs6000/eabi.h
+++ b/contrib/gcc/config/rs6000/eabi.h
@@ -1,6 +1,6 @@
/* Core target definitions for GNU compiler
for IBM RS/6000 PowerPC targeted to embedded ELF systems.
- Copyright (C) 1995, 1996, 2000, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1996, 2000, 2003, 2004 Free Software Foundation, Inc.
Contributed by Cygnus Support.
This file is part of GCC.
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Add -meabi to target flags. */
#undef TARGET_DEFAULT
@@ -49,9 +49,13 @@
#undef TARGET_E500
#undef TARGET_ISEL
#undef TARGET_FPRS
+#undef TARGET_E500_SINGLE
+#undef TARGET_E500_DOUBLE
#define TARGET_SPE_ABI rs6000_spe_abi
#define TARGET_SPE rs6000_spe
#define TARGET_E500 (rs6000_cpu == PROCESSOR_PPC8540)
#define TARGET_ISEL rs6000_isel
-#define TARGET_FPRS (!rs6000_float_gprs)
+#define TARGET_FPRS (rs6000_float_gprs == 0)
+#define TARGET_E500_SINGLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 1)
+#define TARGET_E500_DOUBLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 2)
diff --git a/contrib/gcc/config/rs6000/eabialtivec.h b/contrib/gcc/config/rs6000/eabialtivec.h
index e407e3b..437c0c8 100644
--- a/contrib/gcc/config/rs6000/eabialtivec.h
+++ b/contrib/gcc/config/rs6000/eabialtivec.h
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Add -meabi and -maltivec to target flags. */
#undef TARGET_DEFAULT
diff --git a/contrib/gcc/config/rs6000/eabisim.h b/contrib/gcc/config/rs6000/eabisim.h
index 5e0900d..171c791 100644
--- a/contrib/gcc/config/rs6000/eabisim.h
+++ b/contrib/gcc/config/rs6000/eabisim.h
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (PowerPC Simulated)");
diff --git a/contrib/gcc/config/rs6000/eabispe.h b/contrib/gcc/config/rs6000/eabispe.h
index 1551dc1..2a0b923 100644
--- a/contrib/gcc/config/rs6000/eabispe.h
+++ b/contrib/gcc/config/rs6000/eabispe.h
@@ -1,6 +1,6 @@
/* Core target definitions for GNU compiler
for PowerPC embedded targeted systems with SPE support.
- Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Aldy Hernandez (aldyh@redhat.com).
This file is part of GCC.
@@ -17,11 +17,12 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI)
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI \
+ | MASK_STRICT_ALIGN)
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded SPE)");
@@ -30,17 +31,19 @@
#define SUBSUBTARGET_OVERRIDE_OPTIONS \
if (rs6000_select[1].string == NULL) \
rs6000_cpu = PROCESSOR_PPC8540; \
- if (rs6000_abi_string == NULL) \
+ if (!rs6000_explicit_options.abi) \
rs6000_spe_abi = 1; \
- if (rs6000_float_gprs_string == NULL) \
+ if (!rs6000_explicit_options.float_gprs) \
rs6000_float_gprs = 1; \
/* See note below. */ \
- /*if (rs6000_long_double_size_string == NULL)*/ \
+ /*if (!rs6000_explicit_options.long_double)*/ \
/* rs6000_long_double_type_size = 128;*/ \
- if (rs6000_spe_string == NULL) \
+ if (!rs6000_explicit_options.spe) \
rs6000_spe = 1; \
- if (rs6000_isel_string == NULL) \
- rs6000_isel = 1
+ if (!rs6000_explicit_options.isel) \
+ rs6000_isel = 1; \
+ if (target_flags & MASK_64BIT) \
+ error ("-m64 not supported in this configuration")
/* The e500 ABI says that either long doubles are 128 bits, or if
implemented in any other size, the compiler/linker should error out.
diff --git a/contrib/gcc/config/rs6000/freebsd.h b/contrib/gcc/config/rs6000/freebsd.h
index fe6a801..edd267d 100644
--- a/contrib/gcc/config/rs6000/freebsd.h
+++ b/contrib/gcc/config/rs6000/freebsd.h
@@ -16,8 +16,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Override the defaults, which exist to force the proper definition. */
diff --git a/contrib/gcc/config/rs6000/gnu.h b/contrib/gcc/config/rs6000/gnu.h
index 658a6d6..b7ddbfb 100644
--- a/contrib/gcc/config/rs6000/gnu.h
+++ b/contrib/gcc/config/rs6000/gnu.h
@@ -16,8 +16,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef CPP_OS_DEFAULT_SPEC
#define CPP_OS_DEFAULT_SPEC "%(cpp_os_gnu)"
diff --git a/contrib/gcc/config/rs6000/host-darwin.c b/contrib/gcc/config/rs6000/host-darwin.c
index d04270c..be0c55f 100644
--- a/contrib/gcc/config/rs6000/host-darwin.c
+++ b/contrib/gcc/config/rs6000/host-darwin.c
@@ -1,5 +1,5 @@
/* Darwin/powerpc host-specific hook definitions.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of GCC.
@@ -15,27 +15,37 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include <signal.h>
#include <sys/ucontext.h>
-#include <sys/mman.h>
#include "hosthooks.h"
#include "hosthooks-def.h"
#include "toplev.h"
#include "diagnostic.h"
+#include "config/host-darwin.h"
static void segv_crash_handler (int);
static void segv_handler (int, siginfo_t *, void *);
static void darwin_rs6000_extra_signals (void);
+#ifndef HAVE_DECL_SIGALTSTACK
/* This doesn't have a prototype in signal.h in 10.2.x and earlier,
fixed in later releases. */
extern int sigaltstack(const struct sigaltstack *, struct sigaltstack *);
+#endif
+
+/* The fields of the mcontext_t type have acquired underscores in later
+ OS versions. */
+#ifdef HAS_MCONTEXT_T_UNDERSCORES
+#define MC_FLD(x) __ ## x
+#else
+#define MC_FLD(x) x
+#endif
#undef HOST_HOOKS_EXTRA_SIGNALS
#define HOST_HOOKS_EXTRA_SIGNALS darwin_rs6000_extra_signals
@@ -58,13 +68,17 @@ segv_handler (int sig ATTRIBUTE_UNUSED,
void *scp)
{
ucontext_t *uc = (ucontext_t *)scp;
+ sigset_t sigset;
unsigned faulting_insn;
/* The fault might have happened when trying to run some instruction, in
which case the next line will segfault _again_. Handle this case. */
signal (SIGSEGV, segv_crash_handler);
+ sigemptyset (&sigset);
+ sigaddset (&sigset, SIGSEGV);
+ sigprocmask (SIG_UNBLOCK, &sigset, NULL);
- faulting_insn = *(unsigned *)uc->uc_mcontext->ss.srr0;
+ faulting_insn = *(unsigned *)uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0);
/* Note that this only has to work for GCC, so we don't have to deal
with all the possible cases (GCC has no AltiVec code, for
@@ -73,7 +87,7 @@ segv_handler (int sig ATTRIBUTE_UNUSED,
this. */
if ((faulting_insn & 0xFFFF8000) == 0x94218000 /* stwu %r1, -xxx(%r1) */
- || (faulting_insn & 0xFFFF03FF) == 0x7C21016E /* stwux %r1, xxx, %r1 */
+ || (faulting_insn & 0xFC1F03FF) == 0x7C01016E /* stwux xxx, %r1, xxx */
|| (faulting_insn & 0xFC1F8000) == 0x90018000 /* stw xxx, -yyy(%r1) */
|| (faulting_insn & 0xFC1F8000) == 0xD8018000 /* stfd xxx, -yyy(%r1) */
|| (faulting_insn & 0xFC1F8000) == 0xBC018000 /* stmw xxx, -yyy(%r1) */)
@@ -101,19 +115,20 @@ segv_handler (int sig ATTRIBUTE_UNUSED,
if (strcmp (shell_commands[i][0], shell_name + 1) == 0)
{
fnotice (stderr,
- "Try running `%s' in the shell to raise its limit.\n",
+ "Try running '%s' in the shell to raise its limit.\n",
shell_commands[i][1]);
}
}
if (global_dc->abort_on_error)
- abort ();
+ fancy_abort (__FILE__, __LINE__, __FUNCTION__);
exit (FATAL_EXIT_CODE);
}
fprintf (stderr, "[address=%08lx pc=%08x]\n",
- uc->uc_mcontext->es.dar, uc->uc_mcontext->ss.srr0);
+ uc->uc_mcontext->MC_FLD(es).MC_FLD(dar),
+ uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0));
internal_error ("Segmentation Fault");
exit (FATAL_EXIT_CODE);
}
@@ -137,65 +152,5 @@ darwin_rs6000_extra_signals (void)
fatal_error ("While setting up signal handler: %m");
}
-#undef HOST_HOOKS_GT_PCH_GET_ADDRESS
-#define HOST_HOOKS_GT_PCH_GET_ADDRESS darwin_rs6000_gt_pch_get_address
-#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
-#define HOST_HOOKS_GT_PCH_USE_ADDRESS darwin_rs6000_gt_pch_use_address
-
-/* Yes, this is really supposed to work. */
-static char pch_address_space[1024*1024*1024] __attribute__((aligned (4096)));
-
-/* Return the address of the PCH address space, if the PCH will fit in it. */
-
-static void *
-darwin_rs6000_gt_pch_get_address (size_t sz, int fd ATTRIBUTE_UNUSED)
-{
- if (sz <= sizeof (pch_address_space))
- return pch_address_space;
- else
- return NULL;
-}
-
-/* Check ADDR and SZ for validity, and deallocate (using munmap) that part of
- pch_address_space beyond SZ. */
-
-static int
-darwin_rs6000_gt_pch_use_address (void *addr, size_t sz, int fd, size_t off)
-{
- const size_t pagesize = getpagesize();
- void *mmap_result;
- int ret;
-
- if ((size_t)pch_address_space % pagesize != 0
- || sizeof (pch_address_space) % pagesize != 0)
- abort ();
-
- ret = (addr == pch_address_space && sz <= sizeof (pch_address_space));
- if (! ret)
- sz = 0;
-
- /* Round the size to a whole page size. Normally this is a no-op. */
- sz = (sz + pagesize - 1) / pagesize * pagesize;
-
- if (munmap (pch_address_space + sz, sizeof (pch_address_space) - sz) != 0)
- fatal_error ("couldn't unmap pch_address_space: %m\n");
-
- if (ret)
- {
- mmap_result = mmap (addr, sz,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
- fd, off);
-
- /* The file might not be mmap-able. */
- ret = mmap_result != (void *) MAP_FAILED;
-
- /* Sanity check for broken MAP_FIXED. */
- if (ret && mmap_result != addr)
- abort ();
- }
-
- return ret;
-}
-
const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/contrib/gcc/config/rs6000/host-ppc64-darwin.c b/contrib/gcc/config/rs6000/host-ppc64-darwin.c
new file mode 100644
index 0000000..ec7f9b3
--- /dev/null
+++ b/contrib/gcc/config/rs6000/host-ppc64-darwin.c
@@ -0,0 +1,31 @@
+/* ppc64-darwin host-specific hook definitions.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+#include "config/host-darwin.h"
+
+/* Darwin doesn't do anything special for ppc64 hosts; this file exists just
+ to include config/host-darwin.h. */
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/contrib/gcc/config/rs6000/kaos-ppc.h b/contrib/gcc/config/rs6000/kaos-ppc.h
index d6b92e7..eb18541 100644
--- a/contrib/gcc/config/rs6000/kaos-ppc.h
+++ b/contrib/gcc/config/rs6000/kaos-ppc.h
@@ -16,8 +16,8 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fputs (" (PowerPC/kaOS[ELF])", stderr);
diff --git a/contrib/gcc/config/rs6000/libgcc-ppc-glibc.ver b/contrib/gcc/config/rs6000/libgcc-ppc-glibc.ver
new file mode 100644
index 0000000..cf023dd
--- /dev/null
+++ b/contrib/gcc/config/rs6000/libgcc-ppc-glibc.ver
@@ -0,0 +1,52 @@
+%ifndef _SOFT_FLOAT
+%ifndef __powerpc64__
+%exclude {
+ __multc3
+ __divtc3
+ __powitf2
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+}
+
+GCC_4.1.0 {
+ # long double support
+ __multc3
+ __divtc3
+ __powitf2
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+
+%else
+GCC_3.4.4 {
+%endif
+%else
+GCC_4.2.0 {
+%endif
+
+ # long double support
+ __gcc_qadd
+ __gcc_qsub
+ __gcc_qmul
+ __gcc_qdiv
+
+%ifdef _SOFT_FLOAT
+ __gcc_qneg
+ __gcc_qeq
+ __gcc_qne
+ __gcc_qgt
+ __gcc_qge
+ __gcc_qlt
+ __gcc_qle
+ __gcc_qunord
+ __gcc_stoq
+ __gcc_dtoq
+ __gcc_qtos
+ __gcc_qtod
+ __gcc_qtoi
+ __gcc_qtou
+ __gcc_itoq
+ __gcc_utoq
+%endif
+}
diff --git a/contrib/gcc/config/rs6000/linux-unwind.h b/contrib/gcc/config/rs6000/linux-unwind.h
index 842fd10..6a59a54 100644
--- a/contrib/gcc/config/rs6000/linux-unwind.h
+++ b/contrib/gcc/config/rs6000/linux-unwind.h
@@ -1,5 +1,5 @@
/* DWARF2 EH unwinding support for PowerPC and PowerPC64 Linux.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of GCC.
@@ -23,8 +23,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* This file defines our own versions of various kernel and user
structs, so that system headers are not needed, which otherwise
@@ -89,234 +89,260 @@ struct gcc_ucontext
enum { SIGNAL_FRAMESIZE = 128 };
-/* If the current unwind info (FS) does not contain explicit info
- saving R2, then we have to do a minor amount of code reading to
- figure out if it was saved. The big problem here is that the
- code that does the save/restore is generated by the linker, so
- we have no good way to determine at compile time what to do. */
-
-#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
- do { \
- if ((FS)->regs.reg[2].how == REG_UNSAVED) \
- { \
- unsigned int *insn \
- = (unsigned int *) \
- _Unwind_GetGR ((CTX), LINK_REGISTER_REGNUM); \
- if (*insn == 0xE8410028) \
- _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 40); \
- } \
- } while (0)
-
/* If PC is at a sigreturn trampoline, return a pointer to the
regs. Otherwise return NULL. */
-#define PPC_LINUX_GET_REGS(CONTEXT) \
-({ \
- const unsigned char *pc = (CONTEXT)->ra; \
- struct gcc_regs *regs = NULL; \
- \
- /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */ \
- /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */ \
- if (*(unsigned int *) (pc + 0) != 0x38210000 + SIGNAL_FRAMESIZE \
- || *(unsigned int *) (pc + 8) != 0x44000002) \
- ; \
- else if (*(unsigned int *) (pc + 4) == 0x38000077) \
- { \
- struct sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- unsigned long pad[7]; \
- struct gcc_regs *regs; \
- } *frame = (struct sigframe *) (CONTEXT)->cfa; \
- regs = frame->regs; \
- } \
- else if (*(unsigned int *) (pc + 4) == 0x380000AC) \
- { \
- /* This works for 2.4 kernels, but not for 2.6 kernels with vdso \
- because pc isn't pointing into the stack. Can be removed when \
- no one is running 2.4.19 or 2.4.20, the first two ppc64 \
- kernels released. */ \
- struct rt_sigframe_24 { \
- int tramp[6]; \
- void *pinfo; \
- struct gcc_ucontext *puc; \
- } *frame24 = (struct rt_sigframe_24 *) pc; \
- \
- /* Test for magic value in *puc of vdso. */ \
- if ((long) frame24->puc != -21 * 8) \
- regs = frame24->puc->regs; \
- else \
- { \
- /* This works for 2.4.21 and later kernels. */ \
- struct rt_sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- struct gcc_ucontext uc; \
- unsigned long pad[2]; \
- int tramp[6]; \
- void *pinfo; \
- struct gcc_ucontext *puc; \
- } *frame = (struct rt_sigframe *) (CONTEXT)->cfa; \
- regs = frame->uc.regs; \
- } \
- } \
- regs; \
-})
-
-#define LINUX_HWCAP_DEFAULT 0xc0000000
-
-#define PPC_LINUX_VREGS(REGS) (REGS)->vp
+static struct gcc_regs *
+get_regs (struct _Unwind_Context *context)
+{
+ const unsigned char *pc = context->ra;
+
+ /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */
+ /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */
+ if (*(unsigned int *) (pc + 0) != 0x38210000 + SIGNAL_FRAMESIZE
+ || *(unsigned int *) (pc + 8) != 0x44000002)
+ return NULL;
+ if (*(unsigned int *) (pc + 4) == 0x38000077)
+ {
+ struct sigframe {
+ char gap[SIGNAL_FRAMESIZE];
+ unsigned long pad[7];
+ struct gcc_regs *regs;
+ } *frame = (struct sigframe *) context->cfa;
+ return frame->regs;
+ }
+ else if (*(unsigned int *) (pc + 4) == 0x380000AC)
+ {
+ /* This works for 2.4 kernels, but not for 2.6 kernels with vdso
+ because pc isn't pointing into the stack. Can be removed when
+ no one is running 2.4.19 or 2.4.20, the first two ppc64
+ kernels released. */
+ struct rt_sigframe_24 {
+ int tramp[6];
+ void *pinfo;
+ struct gcc_ucontext *puc;
+ } *frame24 = (struct rt_sigframe_24 *) pc;
+
+ /* Test for magic value in *puc of vdso. */
+ if ((long) frame24->puc != -21 * 8)
+ return frame24->puc->regs;
+ else
+ {
+ /* This works for 2.4.21 and later kernels. */
+ struct rt_sigframe {
+ char gap[SIGNAL_FRAMESIZE];
+ struct gcc_ucontext uc;
+ unsigned long pad[2];
+ int tramp[6];
+ void *pinfo;
+ struct gcc_ucontext *puc;
+ } *frame = (struct rt_sigframe *) context->cfa;
+ return frame->uc.regs;
+ }
+ }
+ return NULL;
+}
#else /* !__powerpc64__ */
enum { SIGNAL_FRAMESIZE = 64 };
-#define PPC_LINUX_GET_REGS(CONTEXT) \
-({ \
- const unsigned char *pc = (CONTEXT)->ra; \
- struct gcc_regs *regs = NULL; \
- \
- /* li r0, 0x7777; sc (sigreturn old) */ \
- /* li r0, 0x0077; sc (sigreturn new) */ \
- /* li r0, 0x6666; sc (rt_sigreturn old) */ \
- /* li r0, 0x00AC; sc (rt_sigreturn new) */ \
- if (*(unsigned int *) (pc + 4) != 0x44000002) \
- ; \
- else if (*(unsigned int *) (pc + 0) == 0x38007777 \
- || *(unsigned int *) (pc + 0) == 0x38000077) \
- { \
- struct sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- unsigned long pad[7]; \
- struct gcc_regs *regs; \
- } *frame = (struct sigframe *) (CONTEXT)->cfa; \
- regs = frame->regs; \
- } \
- else if (*(unsigned int *) (pc + 0) == 0x38006666 \
- || *(unsigned int *) (pc + 0) == 0x380000AC) \
- { \
- struct rt_sigframe { \
- char gap[SIGNAL_FRAMESIZE + 16]; \
- char siginfo[128]; \
- struct gcc_ucontext uc; \
- } *frame = (struct rt_sigframe *) (CONTEXT)->cfa; \
- regs = frame->uc.regs; \
- } \
- regs; \
-})
-
-#define LINUX_HWCAP_DEFAULT 0x80000000
-
-#define PPC_LINUX_VREGS(REGS) &(REGS)->vregs
+static struct gcc_regs *
+get_regs (struct _Unwind_Context *context)
+{
+ const unsigned char *pc = context->ra;
+ /* li r0, 0x7777; sc (sigreturn old) */
+ /* li r0, 0x0077; sc (sigreturn new) */
+ /* li r0, 0x6666; sc (rt_sigreturn old) */
+ /* li r0, 0x00AC; sc (rt_sigreturn new) */
+ if (*(unsigned int *) (pc + 4) != 0x44000002)
+ return NULL;
+ if (*(unsigned int *) (pc + 0) == 0x38007777
+ || *(unsigned int *) (pc + 0) == 0x38000077)
+ {
+ struct sigframe {
+ char gap[SIGNAL_FRAMESIZE];
+ unsigned long pad[7];
+ struct gcc_regs *regs;
+ } *frame = (struct sigframe *) context->cfa;
+ return frame->regs;
+ }
+ else if (*(unsigned int *) (pc + 0) == 0x38006666
+ || *(unsigned int *) (pc + 0) == 0x380000AC)
+ {
+ struct rt_sigframe {
+ char gap[SIGNAL_FRAMESIZE + 16];
+ char siginfo[128];
+ struct gcc_ucontext uc;
+ } *frame = (struct rt_sigframe *) context->cfa;
+ return frame->uc.regs;
+ }
+ return NULL;
+}
#endif
+/* Find an entry in the process auxiliary vector. The canonical way to
+ test for VMX is to look at AT_HWCAP. */
+
+static long
+ppc_linux_aux_vector (long which)
+{
+ /* __libc_stack_end holds the original stack passed to a process. */
+ extern long *__libc_stack_end;
+ long argc;
+ char **argv;
+ char **envp;
+ struct auxv
+ {
+ long a_type;
+ long a_val;
+ } *auxp;
+
+ /* The Linux kernel puts argc first on the stack. */
+ argc = __libc_stack_end[0];
+ /* Followed by argv, NULL terminated. */
+ argv = (char **) __libc_stack_end + 1;
+ /* Followed by environment string pointers, NULL terminated. */
+ envp = argv + argc + 1;
+ while (*envp++)
+ continue;
+ /* Followed by the aux vector, zero terminated. */
+ for (auxp = (struct auxv *) envp; auxp->a_type != 0; ++auxp)
+ if (auxp->a_type == which)
+ return auxp->a_val;
+ return 0;
+}
+
/* Do code reading to identify a signal frame, and set the frame
state data appropriately. See unwind-dw2.c for the structs. */
-#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
- do { \
- static long hwcap = 0; \
- struct gcc_regs *regs = PPC_LINUX_GET_REGS (CONTEXT); \
- long new_cfa; \
- int i; \
- \
- if (regs == NULL) \
- break; \
- \
- new_cfa = regs->gpr[STACK_POINTER_REGNUM]; \
- (FS)->cfa_how = CFA_REG_OFFSET; \
- (FS)->cfa_reg = STACK_POINTER_REGNUM; \
- (FS)->cfa_offset = new_cfa - (long) (CONTEXT)->cfa; \
- \
- for (i = 0; i < 32; i++) \
- if (i != STACK_POINTER_REGNUM) \
- { \
- (FS)->regs.reg[i].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[i].loc.offset \
- = (long) &regs->gpr[i] - new_cfa; \
- } \
- \
- (FS)->regs.reg[CR2_REGNO].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[CR2_REGNO].loc.offset \
- = (long) &regs->ccr - new_cfa; \
- \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].loc.offset \
- = (long) &regs->link - new_cfa; \
- \
- (FS)->regs.reg[ARG_POINTER_REGNUM].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[ARG_POINTER_REGNUM].loc.offset \
- = (long) &regs->nip - new_cfa; \
- (FS)->retaddr_column = ARG_POINTER_REGNUM; \
- \
- if (hwcap == 0) \
- { \
- /* __libc_stack_end holds the original stack passed to a \
- process. */ \
- extern long *__libc_stack_end; \
- long argc; \
- char **argv; \
- char **envp; \
- struct auxv \
- { \
- long a_type; \
- long a_val; \
- } *auxp; \
- \
- /* The Linux kernel puts argc first on the stack. */ \
- argc = __libc_stack_end[0]; \
- /* Followed by argv, NULL terminated. */ \
- argv = (char **) __libc_stack_end + 1; \
- /* Followed by environment string pointers, NULL terminated. */ \
- envp = argv + argc + 1; \
- while (*envp++) \
- continue; \
- /* Followed by the aux vector, zero terminated. */ \
- for (auxp = (struct auxv *) envp; auxp->a_type != 0; ++auxp) \
- if (auxp->a_type == 16) \
- { \
- hwcap = auxp->a_val; \
- break; \
- } \
- \
- /* These will already be set if we found AT_HWCAP. A non-zero \
- value stops us looking again if for some reason we couldn't \
- find AT_HWCAP. */ \
- hwcap |= LINUX_HWCAP_DEFAULT; \
- } \
- \
- /* If we have a FPU... */ \
- if (hwcap & 0x08000000) \
- for (i = 0; i < 32; i++) \
- { \
- (FS)->regs.reg[i + 32].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[i + 32].loc.offset \
- = (long) &regs->fpr[i] - new_cfa; \
- } \
- \
- /* If we have a VMX unit... */ \
- if (hwcap & 0x10000000) \
- { \
- struct gcc_vregs *vregs; \
- vregs = PPC_LINUX_VREGS (regs); \
- if (regs->msr & (1 << 25)) \
- { \
- for (i = 0; i < 32; i++) \
- { \
- (FS)->regs.reg[i + FIRST_ALTIVEC_REGNO].how \
- = REG_SAVED_OFFSET; \
- (FS)->regs.reg[i + FIRST_ALTIVEC_REGNO].loc.offset \
- = (long) &vregs[i] - new_cfa; \
- } \
- \
- (FS)->regs.reg[VSCR_REGNO].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[VSCR_REGNO].loc.offset \
- = (long) &vregs->vscr - new_cfa; \
- } \
- \
- (FS)->regs.reg[VRSAVE_REGNO].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[VRSAVE_REGNO].loc.offset \
- = (long) &vregs->vsave - new_cfa; \
- } \
- \
- goto SUCCESS; \
- } while (0)
+#define MD_FALLBACK_FRAME_STATE_FOR ppc_fallback_frame_state
+
+static _Unwind_Reason_Code
+ppc_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ static long hwcap = 0;
+ struct gcc_regs *regs = get_regs (context);
+ long new_cfa;
+ int i;
+
+ if (regs == NULL)
+ return _URC_END_OF_STACK;
+
+ new_cfa = regs->gpr[STACK_POINTER_REGNUM];
+ fs->cfa_how = CFA_REG_OFFSET;
+ fs->cfa_reg = STACK_POINTER_REGNUM;
+ fs->cfa_offset = new_cfa - (long) context->cfa;
+
+ for (i = 0; i < 32; i++)
+ if (i != STACK_POINTER_REGNUM)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset = (long) &regs->gpr[i] - new_cfa;
+ }
+
+ fs->regs.reg[CR2_REGNO].how = REG_SAVED_OFFSET;
+ fs->regs.reg[CR2_REGNO].loc.offset = (long) &regs->ccr - new_cfa;
+
+ fs->regs.reg[LINK_REGISTER_REGNUM].how = REG_SAVED_OFFSET;
+ fs->regs.reg[LINK_REGISTER_REGNUM].loc.offset = (long) &regs->link - new_cfa;
+
+ fs->regs.reg[ARG_POINTER_REGNUM].how = REG_SAVED_OFFSET;
+ fs->regs.reg[ARG_POINTER_REGNUM].loc.offset = (long) &regs->nip - new_cfa;
+ fs->retaddr_column = ARG_POINTER_REGNUM;
+ fs->signal_frame = 1;
+
+ if (hwcap == 0)
+ {
+ hwcap = ppc_linux_aux_vector (16);
+ /* These will already be set if we found AT_HWCAP. A nonzero
+ value stops us looking again if for some reason we couldn't
+ find AT_HWCAP. */
+#ifdef __powerpc64__
+ hwcap |= 0xc0000000;
+#else
+ hwcap |= 0x80000000;
+#endif
+ }
+
+ /* If we have a FPU... */
+ if (hwcap & 0x08000000)
+ for (i = 0; i < 32; i++)
+ {
+ fs->regs.reg[i + 32].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i + 32].loc.offset = (long) &regs->fpr[i] - new_cfa;
+ }
+
+ /* If we have a VMX unit... */
+ if (hwcap & 0x10000000)
+ {
+ struct gcc_vregs *vregs;
+#ifdef __powerpc64__
+ vregs = regs->vp;
+#else
+ vregs = &regs->vregs;
+#endif
+ if (regs->msr & (1 << 25))
+ {
+ for (i = 0; i < 32; i++)
+ {
+ fs->regs.reg[i + FIRST_ALTIVEC_REGNO].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i + FIRST_ALTIVEC_REGNO].loc.offset
+ = (long) &vregs[i] - new_cfa;
+ }
+
+ fs->regs.reg[VSCR_REGNO].how = REG_SAVED_OFFSET;
+ fs->regs.reg[VSCR_REGNO].loc.offset = (long) &vregs->vscr - new_cfa;
+ }
+
+ fs->regs.reg[VRSAVE_REGNO].how = REG_SAVED_OFFSET;
+ fs->regs.reg[VRSAVE_REGNO].loc.offset = (long) &vregs->vsave - new_cfa;
+ }
+
+ return _URC_NO_REASON;
+}
+
+#define MD_FROB_UPDATE_CONTEXT frob_update_context
+
+static void
+frob_update_context (struct _Unwind_Context *context, _Unwind_FrameState *fs ATTRIBUTE_UNUSED)
+{
+ const unsigned int *pc = (const unsigned int *) context->ra;
+
+ /* Fix up for 2.6.12 - 2.6.16 Linux kernels that have vDSO, but don't
+ have S flag in it. */
+#ifdef __powerpc64__
+ /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */
+ /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */
+ if (pc[0] == 0x38210000 + SIGNAL_FRAMESIZE
+ && (pc[1] == 0x38000077 || pc[1] == 0x380000AC)
+ && pc[2] == 0x44000002)
+ _Unwind_SetSignalFrame (context, 1);
+#else
+ /* li r0, 0x7777; sc (sigreturn old) */
+ /* li r0, 0x0077; sc (sigreturn new) */
+ /* li r0, 0x6666; sc (rt_sigreturn old) */
+ /* li r0, 0x00AC; sc (rt_sigreturn new) */
+ if ((pc[0] == 0x38007777 || pc[0] == 0x38000077
+ || pc[0] == 0x38006666 || pc[0] == 0x380000AC)
+ && pc[1] == 0x44000002)
+ _Unwind_SetSignalFrame (context, 1);
+#endif
+
+#ifdef __powerpc64__
+ if (fs->regs.reg[2].how == REG_UNSAVED)
+ {
+ /* If the current unwind info (FS) does not contain explicit info
+ saving R2, then we have to do a minor amount of code reading to
+ figure out if it was saved. The big problem here is that the
+ code that does the save/restore is generated by the linker, so
+ we have no good way to determine at compile time what to do. */
+ unsigned int *insn
+ = (unsigned int *) _Unwind_GetGR (context, LINK_REGISTER_REGNUM);
+ if (*insn == 0xE8410028)
+ _Unwind_SetGRPtr (context, 2, context->cfa + 40);
+ }
+#endif
+}
diff --git a/contrib/gcc/config/rs6000/linux.h b/contrib/gcc/config/rs6000/linux.h
index 84cdeed..3df356f 100644
--- a/contrib/gcc/config/rs6000/linux.h
+++ b/contrib/gcc/config/rs6000/linux.h
@@ -1,7 +1,7 @@
/* Definitions of target machine for GNU compiler,
for PowerPC machines running Linux.
- Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
- Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+ 2004, 2005, 2006 Free Software Foundation, Inc.
Contributed by Michael Meissner (meissner@cygnus.com).
This file is part of GCC.
@@ -18,8 +18,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef MD_EXEC_PREFIX
#undef MD_STARTFILE_PREFIX
@@ -28,20 +28,23 @@
process. */
#define OS_MISSING_POWERPC64 1
+/* We use glibc _mcount for profiling. */
+#define NO_PROFILE_COUNTERS 1
+
/* glibc has float and long double forms of math functions. */
#undef TARGET_C99_FUNCTIONS
-#define TARGET_C99_FUNCTIONS 1
+#define TARGET_C99_FUNCTIONS (OPTION_GLIBC)
#undef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_define_std ("PPC"); \
- builtin_define_std ("powerpc"); \
- builtin_assert ("cpu=powerpc"); \
- builtin_assert ("machine=powerpc"); \
- TARGET_OS_SYSV_CPP_BUILTINS (); \
- } \
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define_std ("powerpc"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
while (0)
#undef CPP_OS_DEFAULT_SPEC
@@ -74,6 +77,11 @@
#define LINK_GCC_C_SEQUENCE_SPEC \
"%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
+
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (PowerPC GNU/Linux)");
@@ -102,8 +110,18 @@
#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
-#define TARGET_HAS_F_SETLKW
+#define TARGET_POSIX_IO
+
+#define MD_UNWIND_SUPPORT "config/rs6000/linux-unwind.h"
+
+#ifdef TARGET_LIBC_PROVIDES_SSP
+/* ppc32 glibc provides __stack_chk_guard in -0x7008(2). */
+#define TARGET_THREAD_SSP_OFFSET -0x7008
+#endif
+
+#define POWERPC_LINUX
-#ifdef IN_LIBGCC2
-#include "config/rs6000/linux-unwind.h"
+/* ppc linux has 128-bit long double support in glibc 2.4 and later. */
+#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
#endif
diff --git a/contrib/gcc/config/rs6000/linux64.h b/contrib/gcc/config/rs6000/linux64.h
index 84e13a1..f52b245 100644
--- a/contrib/gcc/config/rs6000/linux64.h
+++ b/contrib/gcc/config/rs6000/linux64.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for 64 bit PowerPC linux.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This file is part of GCC.
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#ifndef RS6000_BI_ARCH
@@ -50,8 +50,17 @@
#undef TARGET_AIX
#define TARGET_AIX TARGET_64BIT
-#undef PROCESSOR_DEFAULT64
-#define PROCESSOR_DEFAULT64 PROCESSOR_PPC630
+#ifdef HAVE_LD_NO_DOT_SYMS
+/* New ABI uses a local sym for the function entry point. */
+extern int dot_symbols;
+#undef DOT_SYMBOLS
+#define DOT_SYMBOLS dot_symbols
+#endif
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_POWER4
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4
/* We don't need to generate entries in .fixup, except when
-mrelocatable or -mrelocatable-lib is given. */
@@ -60,7 +69,7 @@
(target_flags & target_flags_explicit & MASK_RELOCATABLE)
#undef RS6000_ABI_NAME
-#define RS6000_ABI_NAME (TARGET_64BIT ? "aixdesc" : "sysv")
+#define RS6000_ABI_NAME "linux"
#define INVALID_64BIT "-m%s not supported in this configuration"
#define INVALID_32BIT INVALID_64BIT
@@ -69,7 +78,7 @@
#define SUBSUBTARGET_OVERRIDE_OPTIONS \
do \
{ \
- if (rs6000_alignment_string == 0) \
+ if (!rs6000_explicit_options.alignment) \
rs6000_alignment_flags = MASK_ALIGN_NATURAL; \
if (TARGET_64BIT) \
{ \
@@ -78,6 +87,7 @@
rs6000_current_abi = ABI_AIX; \
error (INVALID_64BIT, "call"); \
} \
+ dot_symbols = !strcmp (rs6000_abi_name, "aixdesc"); \
if (target_flags & MASK_RELOCATABLE) \
{ \
target_flags &= ~MASK_RELOCATABLE; \
@@ -93,7 +103,7 @@
target_flags &= ~MASK_PROTOTYPE; \
error (INVALID_64BIT, "prototype"); \
} \
- if ((target_flags & MASK_POWERPC64) == 0) \
+ if ((target_flags & MASK_POWERPC64) == 0) \
{ \
target_flags |= MASK_POWERPC64; \
error ("-m64 requires a PowerPC64 cpu"); \
@@ -127,16 +137,16 @@
#ifndef RS6000_BI_ARCH
#define ASM_DEFAULT_SPEC "-mppc64"
-#define ASM_SPEC "%(asm_spec64) %(asm_spec_common)"
+#define ASM_SPEC "%(asm_spec64) %(asm_spec_common)"
#define LINK_OS_LINUX_SPEC "%(link_os_linux_spec64)"
#else
#if DEFAULT_ARCH64_P
#define ASM_DEFAULT_SPEC "-mppc%{!m32:64}"
-#define ASM_SPEC "%{m32:%(asm_spec32)}%{!m32:%(asm_spec64)} %(asm_spec_common)"
+#define ASM_SPEC "%{m32:%(asm_spec32)}%{!m32:%(asm_spec64)} %(asm_spec_common)"
#define LINK_OS_LINUX_SPEC "%{m32:%(link_os_linux_spec32)}%{!m32:%(link_os_linux_spec64)}"
#else
#define ASM_DEFAULT_SPEC "-mppc%{m64:64}"
-#define ASM_SPEC "%{!m64:%(asm_spec32)}%{m64:%(asm_spec64)} %(asm_spec_common)"
+#define ASM_SPEC "%{!m64:%(asm_spec32)}%{m64:%(asm_spec64)} %(asm_spec_common)"
#define LINK_OS_LINUX_SPEC "%{!m64:%(link_os_linux_spec32)}%{m64:%(link_os_linux_spec64)}"
#endif
#endif
@@ -196,24 +206,8 @@
#endif
-#define MASK_PROFILE_KERNEL 0x00100000
-
-/* Non-standard profiling for kernels, which just saves LR then calls
- _mcount without worrying about arg saves. The idea is to change
- the function prologue as little as possible as it isn't easy to
- account for arg save/restore code added just for _mcount. */
-#define TARGET_PROFILE_KERNEL (target_flags & MASK_PROFILE_KERNEL)
-
-/* Override sysv4.h. */
-#undef EXTRA_SUBTARGET_SWITCHES
-#define EXTRA_SUBTARGET_SWITCHES \
- {"profile-kernel", MASK_PROFILE_KERNEL, \
- N_("Call mcount for profiling before a function prologue") }, \
- {"no-profile-kernel", -MASK_PROFILE_KERNEL, \
- N_("Call mcount for profiling after a function prologue") },
-
/* We use glibc _mcount for profiling. */
-#define NO_PROFILE_COUNTERS TARGET_64BIT
+#define NO_PROFILE_COUNTERS 1
#define PROFILE_HOOK(LABEL) \
do { if (TARGET_64BIT) output_profile_hook (LABEL); } while (0)
@@ -234,13 +228,11 @@
the first field is an FP double, only if in power alignment mode. */
#undef ROUND_TYPE_ALIGN
#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
- ((TARGET_ALTIVEC && TREE_CODE (STRUCT) == VECTOR_TYPE) \
- ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
- : (TARGET_64BIT \
- && (TREE_CODE (STRUCT) == RECORD_TYPE \
- || TREE_CODE (STRUCT) == UNION_TYPE \
- || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
- && TARGET_ALIGN_NATURAL == 0) \
+ ((TARGET_64BIT \
+ && (TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TARGET_ALIGN_NATURAL == 0) \
? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
: MAX ((COMPUTED), (SPECIFIED)))
@@ -258,7 +250,7 @@
than a doubleword should be padded upward or downward. You could
reasonably assume that they follow the normal rules for structure
layout treating the parameter area as any other block of memory,
- then map the reg param area to registers. ie. pad updard.
+ then map the reg param area to registers. i.e. pad upward.
Setting both of the following defines results in this behavior.
Setting just the first one will result in aggregates that fit in a
doubleword being padded downward, and others being padded upward.
@@ -267,16 +259,6 @@
#define AGGREGATE_PADDING_FIXED TARGET_64BIT
#define AGGREGATES_PAD_UPWARD_ALWAYS 0
-/* We don't want anything in the reg parm area being passed on the
- stack. */
-#define MUST_PASS_IN_STACK(MODE, TYPE) \
- ((TARGET_64BIT \
- && (TYPE) != 0 \
- && (TREE_CODE (TYPE_SIZE (TYPE)) != INTEGER_CST \
- || TREE_ADDRESSABLE (TYPE))) \
- || (!TARGET_64BIT \
- && default_must_pass_in_stack ((MODE), (TYPE))))
-
/* Specify padding for the last element of a block move between
registers and memory. FIRST is nonzero if this is the only
element. */
@@ -301,10 +283,10 @@
/* glibc has float and long double forms of math functions. */
#undef TARGET_C99_FUNCTIONS
-#define TARGET_C99_FUNCTIONS 1
+#define TARGET_C99_FUNCTIONS (OPTION_GLIBC)
#undef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
+#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
if (TARGET_64BIT) \
@@ -313,7 +295,6 @@
builtin_define ("__PPC64__"); \
builtin_define ("__powerpc__"); \
builtin_define ("__powerpc64__"); \
- builtin_define ("__PIC__"); \
builtin_assert ("cpu=powerpc64"); \
builtin_assert ("machine=powerpc64"); \
} \
@@ -355,13 +336,28 @@
#undef LINK_OS_DEFAULT_SPEC
#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)"
+#define GLIBC_DYNAMIC_LINKER32 "/lib/ld.so.1"
+#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld64.so.1"
+#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0"
+#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0"
+#if UCLIBC_DEFAULT
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:%{muclibc:%e-mglibc and -muclibc used together}" G ";:" U "}"
+#else
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:%{mglibc:%e-mglibc and -muclibc used together}" U ";:" G "}"
+#endif
+#define LINUX_DYNAMIC_LINKER32 \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER32, UCLIBC_DYNAMIC_LINKER32)
+#define LINUX_DYNAMIC_LINKER64 \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER64, UCLIBC_DYNAMIC_LINKER64)
+
+
#define LINK_OS_LINUX_SPEC32 "-m elf32ppclinux %{!shared: %{!static: \
%{rdynamic:-export-dynamic} \
- %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+ %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER32 "}}}"
#define LINK_OS_LINUX_SPEC64 "-m elf64ppc %{!shared: %{!static: \
%{rdynamic:-export-dynamic} \
- %{!dynamic-linker:-dynamic-linker /lib64/ld64.so.1}}}"
+ %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER64 "}}}"
#undef TOC_SECTION_ASM_OP
#define TOC_SECTION_ASM_OP \
@@ -412,11 +408,19 @@
object files, each potentially with a different TOC pointer. For
that reason, place a nop after the call so that the linker can
restore the TOC pointer if a TOC adjusting call stub is needed. */
+#if DOT_SYMBOLS
#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
asm (SECTION_OP "\n" \
" bl ." #FUNC "\n" \
" nop\n" \
" .previous");
+#else
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n" \
+" bl " #FUNC "\n" \
+" nop\n" \
+" .previous");
+#endif
#endif
/* FP save and restore routines. */
@@ -441,13 +445,11 @@
if (!flag_inhibit_size_directive) \
{ \
fputs ("\t.size\t", (FILE)); \
- if (TARGET_64BIT) \
+ if (TARGET_64BIT && DOT_SYMBOLS) \
putc ('.', (FILE)); \
assemble_name ((FILE), (FNAME)); \
fputs (",.-", (FILE)); \
- if (TARGET_64BIT) \
- putc ('.', (FILE)); \
- assemble_name ((FILE), (FNAME)); \
+ rs6000_output_function_entry (FILE, FNAME); \
putc ('\n', (FILE)); \
} \
} \
@@ -476,72 +478,14 @@
&& ((TARGET_64BIT \
&& (TARGET_POWERPC64 \
|| TARGET_MINIMAL_TOC \
- || (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ || (SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
&& ! TARGET_NO_FP_IN_TOC))) \
|| (!TARGET_64BIT \
&& !TARGET_NO_FP_IN_TOC \
&& !TARGET_RELOCATABLE \
- && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
&& BITS_PER_WORD == HOST_BITS_PER_INT)))))
-/* This is the same as the dbxelf.h version, except that we need to
- use the function code label, not the function descriptor. */
-#undef ASM_OUTPUT_SOURCE_LINE
-#define ASM_OUTPUT_SOURCE_LINE(FILE, LINE, COUNTER) \
-do \
- { \
- char temp[256]; \
- ASM_GENERATE_INTERNAL_LABEL (temp, "LM", COUNTER); \
- fprintf (FILE, "\t.stabn 68,0,%d,", LINE); \
- assemble_name (FILE, temp); \
- putc ('-', FILE); \
- if (TARGET_64BIT) \
- putc ('.', FILE); \
- assemble_name (FILE, \
- XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));\
- putc ('\n', FILE); \
- (*targetm.asm_out.internal_label) (FILE, "LM", COUNTER); \
- } \
-while (0)
-
-/* Similarly, we want the function code label here. */
-#define DBX_OUTPUT_BRAC(FILE, NAME, BRAC) \
- do \
- { \
- const char *flab; \
- fprintf (FILE, "%s%d,0,0,", ASM_STABN_OP, BRAC); \
- assemble_name (FILE, NAME); \
- putc ('-', FILE); \
- if (current_function_func_begin_label != NULL_TREE) \
- flab = IDENTIFIER_POINTER (current_function_func_begin_label); \
- else \
- { \
- if (TARGET_64BIT) \
- putc ('.', FILE); \
- flab = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); \
- } \
- assemble_name (FILE, flab); \
- putc ('\n', FILE); \
- } \
- while (0)
-
-#define DBX_OUTPUT_LBRAC(FILE, NAME) DBX_OUTPUT_BRAC (FILE, NAME, N_LBRAC)
-#define DBX_OUTPUT_RBRAC(FILE, NAME) DBX_OUTPUT_BRAC (FILE, NAME, N_RBRAC)
-
-/* Another case where we want the dot name. */
-#define DBX_OUTPUT_NFUN(FILE, LSCOPE, DECL) \
- do \
- { \
- fprintf (FILE, "%s\"\",%d,0,0,", ASM_STABS_OP, N_FUN); \
- assemble_name (FILE, LSCOPE); \
- putc ('-', FILE); \
- if (TARGET_64BIT) \
- putc ('.', FILE); \
- assemble_name (FILE, XSTR (XEXP (DECL_RTL (DECL), 0), 0)); \
- putc ('\n', FILE); \
- } \
- while (0)
-
/* Select a format to encode pointers in exception handling data. CODE
is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
true if the symbol may be affected by dynamic relocations. */
@@ -559,11 +503,27 @@ while (0)
#define TARGET_ASM_FILE_END rs6000_elf_end_indicate_exec_stack
-#define TARGET_HAS_F_SETLKW
+#define TARGET_POSIX_IO
#define LINK_GCC_C_SEQUENCE_SPEC \
"%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
-#ifdef IN_LIBGCC2
-#include "config/rs6000/linux-unwind.h"
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
+
+#define MD_UNWIND_SUPPORT "config/rs6000/linux-unwind.h"
+
+#ifdef TARGET_LIBC_PROVIDES_SSP
+/* ppc32 glibc provides __stack_chk_guard in -0x7008(2),
+ ppc64 glibc provides it at -0x7010(13). */
+#define TARGET_THREAD_SSP_OFFSET (TARGET_64BIT ? -0x7010 : -0x7008)
+#endif
+
+#define POWERPC_LINUX
+
+/* ppc{32,64} linux has 128-bit long double support in glibc 2.4 and later. */
+#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
#endif
diff --git a/contrib/gcc/config/rs6000/linux64.opt b/contrib/gcc/config/rs6000/linux64.opt
new file mode 100644
index 0000000..ebd3384
--- /dev/null
+++ b/contrib/gcc/config/rs6000/linux64.opt
@@ -0,0 +1,25 @@
+; Options for 64-bit PowerPC Linux.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mprofile-kernel
+Target Report Mask(PROFILE_KERNEL)
+Call mcount for profiling before a function prologue
diff --git a/contrib/gcc/config/rs6000/linuxaltivec.h b/contrib/gcc/config/rs6000/linuxaltivec.h
index 73ac864..181bb81 100644
--- a/contrib/gcc/config/rs6000/linuxaltivec.h
+++ b/contrib/gcc/config/rs6000/linuxaltivec.h
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (PowerPC AltiVec GNU/Linux)");
diff --git a/contrib/gcc/config/rs6000/linuxspe.h b/contrib/gcc/config/rs6000/linuxspe.h
index 59eb831..fd7d20e 100644
--- a/contrib/gcc/config/rs6000/linuxspe.h
+++ b/contrib/gcc/config/rs6000/linuxspe.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for PowerPC e500 machines running GNU/Linux.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004 Free Software Foundation, Inc.
Contributed by Aldy Hernandez (aldy@quesejoda.com).
This file is part of GCC.
@@ -17,43 +17,49 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (PowerPC E500 GNU/Linux)");
/* Override rs6000.h and sysv4.h definition. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_STRICT_ALIGN)
#undef TARGET_SPE_ABI
#undef TARGET_SPE
#undef TARGET_E500
#undef TARGET_ISEL
#undef TARGET_FPRS
+#undef TARGET_E500_SINGLE
+#undef TARGET_E500_DOUBLE
#define TARGET_SPE_ABI rs6000_spe_abi
#define TARGET_SPE rs6000_spe
#define TARGET_E500 (rs6000_cpu == PROCESSOR_PPC8540)
#define TARGET_ISEL rs6000_isel
-#define TARGET_FPRS (!rs6000_float_gprs)
+#define TARGET_FPRS (rs6000_float_gprs == 0)
+#define TARGET_E500_SINGLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 1)
+#define TARGET_E500_DOUBLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 2)
#undef SUBSUBTARGET_OVERRIDE_OPTIONS
#define SUBSUBTARGET_OVERRIDE_OPTIONS \
if (rs6000_select[1].string == NULL) \
rs6000_cpu = PROCESSOR_PPC8540; \
- if (rs6000_abi_string == NULL || strstr (rs6000_abi_string, "spe") == NULL) \
+ if (!rs6000_explicit_options.abi) \
rs6000_spe_abi = 1; \
- if (rs6000_float_gprs_string == NULL) \
+ if (!rs6000_explicit_options.float_gprs) \
rs6000_float_gprs = 1; \
/* See note below. */ \
- /*if (rs6000_long_double_size_string == NULL)*/ \
+ /*if (!rs6000_explicit_options.long_double)*/ \
/* rs6000_long_double_type_size = 128;*/ \
- if (rs6000_spe_string == NULL) \
+ if (!rs6000_explicit_options.spe) \
rs6000_spe = 1; \
- if (rs6000_isel_string == NULL) \
- rs6000_isel = 1
+ if (!rs6000_explicit_options.isel) \
+ rs6000_isel = 1; \
+ if (target_flags & MASK_64BIT) \
+ error ("-m64 not supported in this configuration")
/* The e500 ABI says that either long doubles are 128 bits, or if
implemented in any other size, the compiler/linker should error out.
diff --git a/contrib/gcc/config/rs6000/lynx.h b/contrib/gcc/config/rs6000/lynx.h
index b32b078..ab2d16f 100644
--- a/contrib/gcc/config/rs6000/lynx.h
+++ b/contrib/gcc/config/rs6000/lynx.h
@@ -1,6 +1,8 @@
/* Definitions for Rs6000 running LynxOS.
- Copyright (C) 1995, 1996, 2000, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1996, 2000, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
Contributed by David Henkel-Wallace, Cygnus Support (gumby@cygnus.com)
+ Rewritten by Adam Nemet, LynuxWorks Inc.
This file is part of GCC.
@@ -16,72 +18,108 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-/* Print subsidiary information on the compiler version in use. */
-#define TARGET_VERSION fprintf (stderr, " (LynxOS-RS/6000)");
-
-/* LynxOS has signed chars, regardless of what most R/S 6000 systems do */
-#undef DEFAULT_SIGNED_CHAR
-#define DEFAULT_SIGNED_CHAR 1
-
-#undef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_assert ("cpu=rs6000"); \
- builtin_assert ("machine=rs6000"); \
- builtin_assert ("system=lynx"); \
- builtin_assert ("system=unix"); \
- builtin_define_std ("Lynx"); \
- builtin_define ("_IBMR2"); \
- builtin_define_std ("unix"); \
- builtin_define_std ("rs6000"); \
- builtin_define_std ("lynx"); \
- builtin_define_std ("LYNX"); \
- } \
- while (0)
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* Override the definition in sysv4.h. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (PowerPC/LynxOS)", stderr);
+
+/* Undefine the definition to enable the LynxOS default from the
+ top-level lynx.h. */
+
+#undef SUBTARGET_EXTRA_SPECS
+
+/* Get rid off the spec definitions from rs6000/sysv4.h. */
-#undef LINK_SPEC
-#define LINK_SPEC "-T0x10001000 -H0x1000 -D0x20000000 -btextro -bhalt:4 -bnodelcsect -bnso -bro -bnoglink %{v} %{b*}"
+#undef CPP_SPEC
+#define CPP_SPEC \
+"%{msoft-float: -D_SOFT_FLOAT} \
+ %(cpp_cpu) \
+ %(cpp_os_lynx)"
-#undef LIB_SPEC
-#define LIB_SPEC "%{mthreads:-L/lib/thread/} \
- %{msystem-v:-lc_v -lm.v} \
- %{!msystem-v:%{mposix:-lc_p} -lc -lm}"
+/* LynxOS only supports big-endian on PPC so we override the
+ definition from sysv4.h. Since the LynxOS 4.0 compiler was set to
+ return every structure in memory regardless of their size we have
+ to emulate the same behavior here with disabling the SVR4 structure
+ returning. */
-#undef STARTFILE_SPEC
-#define STARTFILE_SPEC "%{p:%{mthreads:thread/pinit.o%s}%{!mthreads:pinit.o%s}}%{!p:%{msystem-v:vinit.o%s -e_start}%{!msystem-v:%{mthreads:thread/init.o%s}%{!mthreads:init.o%s}}}"
+#undef CC1_SPEC
+#define CC1_SPEC \
+"%{G*} %{mno-sdata:-msdata=none} \
+ %{maltivec:-mabi=altivec} \
+ -maix-struct-return"
+#undef ASM_SPEC
+#define ASM_SPEC \
+"%(asm_cpu) \
+ %{.s: %{mregnames} %{mno-regnames}} \
+ %{.S: %{mregnames} %{mno-regnames}}"
+
+#undef STARTFILE_SPEC
#undef ENDFILE_SPEC
+#undef LIB_SPEC
+#undef LINK_SPEC
+#define LINK_SPEC \
+"%{!msdata=none:%{G*}} %{msdata=none:-G0} \
+ %(link_os_lynx)"
+
+/* Override the definition from sysv4.h. */
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__BIG_ENDIAN__"); \
+ builtin_define ("__powerpc__"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ builtin_define ("__PPC__"); \
+ } \
+ while (0)
+
+/* Override the rs6000.h definition. */
+
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override the rs6000.h definition. */
+
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* LynxOS does not do anything with .fixup plus let's not create
+ writable section for linkonce.r and linkonce.t. */
+
+#undef RELOCATABLE_NEEDS_FIXUP
+
+/* Override these from rs6000.h with the generic definition. */
+
+#undef SIZE_TYPE
+#undef ASM_OUTPUT_ALIGN
+#undef PREFERRED_DEBUGGING_TYPE
+
+/* The file rs6000.c defines TARGET_HAVE_TLS unconditionally to the
+ value of HAVE_AS_TLS. HAVE_AS_TLS is true as gas support for TLS
+ is detected by configure. Override the definition to false. */
+
+#undef HAVE_AS_TLS
+#define HAVE_AS_TLS 0
+
+#ifdef CRT_BEGIN
+/* This function is part of crtbegin*.o which is at the beginning of
+ the link and is called from .fini which is usually toward the end
+ of the executable. Make it longcall so that we don't limit the
+ text size of the executables to 32M. */
+
+static void __do_global_dtors_aux (void) __attribute__ ((longcall));
+#endif /* CRT_BEGIN */
+
+#ifdef CRT_END
+/* Similarly here. This function resides in crtend*.o which is toward
+ to end of the link and is called from .init which is at the
+ beginning. */
-/* This can become more refined as we have more powerpc options. */
-#undef ASM_SPEC
-#define ASM_SPEC "-u %(asm_cpu)"
-
-#undef SUBTARGET_SWITCHES
-#define SUBTARGET_SWITCHES \
- {"threads", MASK_THREADS}, \
- {"posix", MASK_POSIX}, \
- {"system-v", MASK_SYSTEM_V},
-
-#undef SUBTARGET_OVERRIDE_OPTIONS
-#define SUBTARGET_OVERRIDE_OPTIONS \
-do { \
- if (TARGET_SYSTEM_V && profile_flag) \
- warning ("-msystem-v and -p are incompatible"); \
- if (TARGET_SYSTEM_V && TARGET_THREADS) \
- warning ("-msystem-v and -mthreads are incompatible"); \
-} while (0)
-
-/* For collect2 */
-#define OBJECT_FORMAT_NONE
-#undef OBJECT_FORMAT_COFF
-#undef MD_EXEC_PREFIX
-#undef REAL_LD_FILE_NAME
-#undef REAL_STRIP_FILE_NAME
-
-/* LynxOS doesn't have mcount. */
-#undef FUNCTION_PROFILER
-#define FUNCTION_PROFILER(file, profile_label_no)
+static void __do_global_ctors_aux (void) __attribute__ ((longcall));
+#endif /* CRT_END */
diff --git a/contrib/gcc/config/rs6000/mpc.md b/contrib/gcc/config/rs6000/mpc.md
index b95bba5..75e4752 100644
--- a/contrib/gcc/config/rs6000/mpc.md
+++ b/contrib/gcc/config/rs6000/mpc.md
@@ -1,5 +1,5 @@
;; Scheduling description for Motorola PowerPC processor cores.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
@@ -15,8 +15,8 @@
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "mpc,mpcfp")
(define_cpu_unit "iu_mpc,mciu_mpc" "mpc")
@@ -27,11 +27,12 @@
;; 505/801/821/823
(define_insn_reservation "mpccore-load" 2
- (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
(eq_attr "cpu" "mpccore"))
"lsu_mpc")
-(define_insn_reservation "mpccore-store" 1
+(define_insn_reservation "mpccore-store" 2
(and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
(eq_attr "cpu" "mpccore"))
"lsu_mpc")
@@ -46,6 +47,16 @@
(eq_attr "cpu" "mpccore"))
"iu_mpc")
+(define_insn_reservation "mpccore-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,iu_mpc")
+
+(define_insn_reservation "mpccore-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,iu_mpc,iu_mpc")
+
(define_insn_reservation "mpccore-imul" 2
(and (eq_attr "type" "imul,imul2,imul3,imul_compare")
(eq_attr "cpu" "mpccore"))
@@ -93,7 +104,7 @@
"bpu_mpc")
(define_insn_reservation "mpccore-jmpreg" 1
- (and (eq_attr "type" "jmpreg,branch,cr_logical,delayed_cr,mfcr,mtcr")
+ (and (eq_attr "type" "jmpreg,branch,cr_logical,delayed_cr,mfcr,mtcr,isync")
(eq_attr "cpu" "mpccore"))
"bpu_mpc")
diff --git a/contrib/gcc/config/rs6000/netbsd.h b/contrib/gcc/config/rs6000/netbsd.h
index e0c5196..151c232 100644
--- a/contrib/gcc/config/rs6000/netbsd.h
+++ b/contrib/gcc/config/rs6000/netbsd.h
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_OS_CPP_BUILTINS /* FIXME: sysv4.h should not define this! */
#define TARGET_OS_CPP_BUILTINS() \
diff --git a/contrib/gcc/config/rs6000/power4.md b/contrib/gcc/config/rs6000/power4.md
index fabc1de..53ac066 100644
--- a/contrib/gcc/config/rs6000/power4.md
+++ b/contrib/gcc/config/rs6000/power4.md
@@ -1,5 +1,5 @@
;; Scheduling description for IBM Power4 and PowerPC 970 processors.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
@@ -15,8 +15,8 @@
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
;; Sources: IBM Red Book and White Paper on POWER4
@@ -38,38 +38,37 @@
(define_reservation "lsq_power4"
"(du1_power4,lsu1_power4)\
|(du2_power4,lsu2_power4)\
- |(du3_power4,nothing,lsu2_power4)\
- |(du4_power4,nothing,lsu1_power4)")
+ |(du3_power4,lsu2_power4)\
+ |(du4_power4,lsu1_power4)")
(define_reservation "lsuq_power4"
"(du1_power4+du2_power4,lsu1_power4+iu2_power4)\
|(du2_power4+du3_power4,lsu2_power4+iu2_power4)\
|(du3_power4+du4_power4,lsu2_power4+iu1_power4)")
-; |(du2_power4+du3_power4,nothing,lsu2_power4,iu2_power4)
(define_reservation "iq_power4"
"(du1_power4,iu1_power4)\
|(du2_power4,iu2_power4)\
- |(du3_power4,nothing,iu2_power4)\
- |(du4_power4,nothing,iu1_power4)")
+ |(du3_power4,iu2_power4)\
+ |(du4_power4,iu1_power4)")
(define_reservation "fpq_power4"
"(du1_power4,fpu1_power4)\
|(du2_power4,fpu2_power4)\
- |(du3_power4,nothing,fpu2_power4)\
- |(du4_power4,nothing,fpu1_power4)")
+ |(du3_power4,fpu2_power4)\
+ |(du4_power4,fpu1_power4)")
(define_reservation "vq_power4"
"(du1_power4,vec_power4)\
|(du2_power4,vec_power4)\
- |(du3_power4,nothing,vec_power4)\
- |(du4_power4,nothing,vec_power4)")
+ |(du3_power4,vec_power4)\
+ |(du4_power4,vec_power4)")
(define_reservation "vpq_power4"
"(du1_power4,vecperm_power4)\
|(du2_power4,vecperm_power4)\
- |(du3_power4,nothing,vecperm_power4)\
- |(du4_power4,nothing,vecperm_power4)")
+ |(du3_power4,vecperm_power4)\
+ |(du4_power4,vecperm_power4)")
; Dispatch slots are allocated in order conforming to program order.
@@ -130,15 +129,15 @@
(eq_attr "cpu" "power4"))
"lsq_power4")
-(define_insn_reservation "power4-store" 1
+(define_insn_reservation "power4-store" 12
(and (eq_attr "type" "store")
(eq_attr "cpu" "power4"))
"(du1_power4,lsu1_power4,iu1_power4)\
|(du2_power4,lsu2_power4,iu2_power4)\
- |(du3_power4,lsu2_power4,nothing,iu2_power4)\
- |(du4_power4,lsu1_power4,nothing,iu1_power4)")
+ |(du3_power4,lsu2_power4,iu2_power4)\
+ |(du4_power4,lsu1_power4,iu1_power4)")
-(define_insn_reservation "power4-store-update" 1
+(define_insn_reservation "power4-store-update" 12
(and (eq_attr "type" "store_u")
(eq_attr "cpu" "power4"))
"(du1_power4+du2_power4,lsu1_power4+iu2_power4,iu1_power4)\
@@ -146,35 +145,40 @@
|(du3_power4+du4_power4,lsu2_power4+iu1_power4,iu2_power4)\
|(du3_power4+du4_power4,lsu2_power4,iu1_power4,iu2_power4)")
-(define_insn_reservation "power4-store-update-indexed" 1
+(define_insn_reservation "power4-store-update-indexed" 12
(and (eq_attr "type" "store_ux")
(eq_attr "cpu" "power4"))
"du1_power4+du2_power4+du3_power4+du4_power4,\
iu1_power4,lsu2_power4+iu2_power4,iu2_power4")
-(define_insn_reservation "power4-fpstore" 1
+(define_insn_reservation "power4-fpstore" 12
(and (eq_attr "type" "fpstore")
(eq_attr "cpu" "power4"))
"(du1_power4,lsu1_power4,fpu1_power4)\
|(du2_power4,lsu2_power4,fpu2_power4)\
- |(du3_power4,lsu2_power4,nothing,fpu2_power4)\
- |(du4_power4,lsu1_power4,nothing,fpu1_power4)")
+ |(du3_power4,lsu2_power4,fpu2_power4)\
+ |(du4_power4,lsu1_power4,fpu1_power4)")
-(define_insn_reservation "power4-fpstore-update" 1
+(define_insn_reservation "power4-fpstore-update" 12
(and (eq_attr "type" "fpstore_u,fpstore_ux")
(eq_attr "cpu" "power4"))
"(du1_power4+du2_power4,lsu1_power4+iu2_power4,fpu1_power4)\
|(du2_power4+du3_power4,lsu2_power4+iu2_power4,fpu2_power4)\
|(du3_power4+du4_power4,lsu2_power4+iu1_power4,fpu2_power4)")
-; |(du3_power4+du4_power4,nothing,lsu2_power4+iu1_power4,fpu2_power4)")
-(define_insn_reservation "power4-vecstore" 1
+(define_insn_reservation "power4-vecstore" 12
(and (eq_attr "type" "vecstore")
(eq_attr "cpu" "power4"))
"(du1_power4,lsu1_power4,vec_power4)\
|(du2_power4,lsu2_power4,vec_power4)\
- |(du3_power4,lsu2_power4,nothing,vec_power4)\
- |(du4_power4,lsu1_power4,nothing,vec_power4)")
+ |(du3_power4,lsu2_power4,vec_power4)\
+ |(du4_power4,lsu1_power4,vec_power4)")
+
+(define_insn_reservation "power4-llsc" 11
+ (and (eq_attr "type" "load_l,store_c,sync")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4")
; Integer latency is 2 cycles
@@ -183,6 +187,26 @@
(eq_attr "cpu" "power4"))
"iq_power4")
+(define_insn_reservation "power4-two" 2
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4,nothing,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4,nothing,iu1_power4)\
+ |(du4_power4+du1_power4,iu1_power4,nothing,iu1_power4)")
+
+(define_insn_reservation "power4-three" 2
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4+du3_power4,\
+ iu1_power4,nothing,iu2_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4+du4_power4,\
+ iu2_power4,nothing,iu2_power4,nothing,iu1_power4)\
+ |(du3_power4+du4_power4+du1_power4,\
+ iu2_power4,nothing,iu1_power4,nothing,iu1_power4)\
+ |(du4_power4+du1_power4+du2_power4,\
+ iu1_power4,nothing,iu2_power4,nothing,iu2_power4)")
+
(define_insn_reservation "power4-insert" 4
(and (eq_attr "type" "insert_word")
(eq_attr "cpu" "power4"))
@@ -200,7 +224,7 @@
(eq_attr "cpu" "power4"))
"(du1_power4+du2_power4,iu1_power4,iu2_power4)\
|(du2_power4+du3_power4,iu2_power4,iu2_power4)\
- |(du3_power4+du4_power4,nothing,iu2_power4,iu1_power4)")
+ |(du3_power4+du4_power4,iu2_power4,iu1_power4)")
(define_bypass 4 "power4-compare" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
@@ -210,7 +234,6 @@
"(du1_power4+du2_power4,iu1_power4*6,iu2_power4)\
|(du2_power4+du3_power4,iu2_power4*6,iu2_power4)\
|(du3_power4+du4_power4,iu2_power4*6,iu1_power4)")
-; |(du3_power4+du4_power4,nothing,iu2_power4*6,iu1_power4)")
(define_bypass 10 "power4-lmul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
@@ -220,7 +243,6 @@
"(du1_power4+du2_power4,iu1_power4*4,iu2_power4)\
|(du2_power4+du3_power4,iu2_power4*4,iu2_power4)\
|(du3_power4+du4_power4,iu2_power4*4,iu1_power4)")
-; |(du3_power4+du4_power4,nothing,iu2_power4*4,iu1_power4)")
(define_bypass 8 "power4-imul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
@@ -230,9 +252,7 @@
"(du1_power4,iu1_power4*6)\
|(du2_power4,iu2_power4*6)\
|(du3_power4,iu2_power4*6)\
- |(du4_power4,iu2_power4*6)")
-; |(du3_power4,nothing,iu2_power4*6)\
-; |(du4_power4,nothing,iu2_power4*6)")
+ |(du4_power4,iu1_power4*6)")
(define_insn_reservation "power4-imul" 5
(and (eq_attr "type" "imul")
@@ -241,8 +261,6 @@
|(du2_power4,iu2_power4*4)\
|(du3_power4,iu2_power4*4)\
|(du4_power4,iu1_power4*4)")
-; |(du3_power4,nothing,iu2_power4*4)\
-; |(du4_power4,nothing,iu1_power4*4)")
(define_insn_reservation "power4-imul3" 4
(and (eq_attr "type" "imul2,imul3")
@@ -251,8 +269,6 @@
|(du2_power4,iu2_power4*3)\
|(du3_power4,iu2_power4*3)\
|(du4_power4,iu1_power4*3)")
-; |(du3_power4,nothing,iu2_power4*3)\
-; |(du4_power4,nothing,iu1_power4*3)")
; SPR move only executes in first IU.
@@ -335,8 +351,6 @@
|(du2_power4,fpu2_power4*28)\
|(du3_power4,fpu2_power4*28)\
|(du4_power4,fpu1_power4*28)")
-; |(du3_power4,nothing,fpu2_power4*28)\
-; |(du4_power4,nothing,fpu1_power4*28)")
(define_insn_reservation "power4-sqrt" 40
(and (eq_attr "type" "ssqrt,dsqrt")
@@ -345,8 +359,12 @@
|(du2_power4,fpu2_power4*35)\
|(du3_power4,fpu2_power4*35)\
|(du4_power4,fpu2_power4*35)")
-; |(du3_power4,nothing,fpu2_power4*35)\
-; |(du4_power4,nothing,fpu2_power4*35)")
+
+(define_insn_reservation "power4-isync" 2
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4")
; VMX
diff --git a/contrib/gcc/config/rs6000/power5.md b/contrib/gcc/config/rs6000/power5.md
index 59baa79..ce68926 100644
--- a/contrib/gcc/config/rs6000/power5.md
+++ b/contrib/gcc/config/rs6000/power5.md
@@ -15,8 +15,8 @@
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
;; Sources: IBM Red Book and White Paper on POWER5
@@ -37,20 +37,20 @@
(define_reservation "lsq_power5"
"(du1_power5,lsu1_power5)\
|(du2_power5,lsu2_power5)\
- |(du3_power5,nothing,lsu2_power5)\
- |(du4_power5,nothing,lsu1_power5)")
+ |(du3_power5,lsu2_power5)\
+ |(du4_power5,lsu1_power5)")
(define_reservation "iq_power5"
"(du1_power5,iu1_power5)\
|(du2_power5,iu2_power5)\
- |(du3_power5,nothing,iu2_power5)\
- |(du4_power5,nothing,iu1_power5)")
+ |(du3_power5,iu2_power5)\
+ |(du4_power5,iu1_power5)")
(define_reservation "fpq_power5"
"(du1_power5,fpu1_power5)\
|(du2_power5,fpu2_power5)\
- |(du3_power5,nothing,fpu2_power5)\
- |(du4_power5,nothing,fpu1_power5)")
+ |(du3_power5,fpu2_power5)\
+ |(du4_power5,fpu1_power5)")
; Dispatch slots are allocated in order conforming to program order.
(absence_set "du1_power5" "du2_power5,du3_power5,du4_power5,du5_power5")
@@ -103,38 +103,44 @@
(eq_attr "cpu" "power5"))
"du1_power5+du2_power5,lsu1_power5+iu2_power5")
-(define_insn_reservation "power5-store" 1
+(define_insn_reservation "power5-store" 12
(and (eq_attr "type" "store")
(eq_attr "cpu" "power5"))
"(du1_power5,lsu1_power5,iu1_power5)\
|(du2_power5,lsu2_power5,iu2_power5)\
- |(du3_power5,lsu2_power5,nothing,iu2_power5)\
- |(du4_power5,lsu1_power5,nothing,iu1_power5)")
+ |(du3_power5,lsu2_power5,iu2_power5)\
+ |(du4_power5,lsu1_power5,iu1_power5)")
-(define_insn_reservation "power5-store-update" 1
+(define_insn_reservation "power5-store-update" 12
(and (eq_attr "type" "store_u")
(eq_attr "cpu" "power5"))
"du1_power5+du2_power5,lsu1_power5+iu2_power5,iu1_power5")
-(define_insn_reservation "power5-store-update-indexed" 1
+(define_insn_reservation "power5-store-update-indexed" 12
(and (eq_attr "type" "store_ux")
(eq_attr "cpu" "power5"))
"du1_power5+du2_power5+du3_power5+du4_power5,\
iu1_power5,lsu2_power5+iu2_power5,iu2_power5")
-(define_insn_reservation "power5-fpstore" 1
+(define_insn_reservation "power5-fpstore" 12
(and (eq_attr "type" "fpstore")
(eq_attr "cpu" "power5"))
"(du1_power5,lsu1_power5,fpu1_power5)\
|(du2_power5,lsu2_power5,fpu2_power5)\
- |(du3_power5,lsu2_power5,nothing,fpu2_power5)\
- |(du4_power5,lsu1_power5,nothing,fpu1_power5)")
+ |(du3_power5,lsu2_power5,fpu2_power5)\
+ |(du4_power5,lsu1_power5,fpu1_power5)")
-(define_insn_reservation "power5-fpstore-update" 1
+(define_insn_reservation "power5-fpstore-update" 12
(and (eq_attr "type" "fpstore_u,fpstore_ux")
(eq_attr "cpu" "power5"))
"du1_power5+du2_power5,lsu1_power5+iu2_power5,fpu1_power5")
+(define_insn_reservation "power5-llsc" 11
+ (and (eq_attr "type" "load_l,store_c,sync")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5")
+
; Integer latency is 2 cycles
(define_insn_reservation "power5-integer" 2
@@ -142,6 +148,26 @@
(eq_attr "cpu" "power5"))
"iq_power5")
+(define_insn_reservation "power5-two" 2
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5+du2_power5,iu1_power5,nothing,iu2_power5)\
+ |(du2_power5+du3_power5,iu2_power5,nothing,iu2_power5)\
+ |(du3_power5+du4_power5,iu2_power5,nothing,iu1_power5)\
+ |(du4_power5+du1_power5,iu1_power5,nothing,iu1_power5)")
+
+(define_insn_reservation "power5-three" 2
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5+du2_power5+du3_power5,\
+ iu1_power5,nothing,iu2_power5,nothing,iu2_power5)\
+ |(du2_power5+du3_power5+du4_power5,\
+ iu2_power5,nothing,iu2_power5,nothing,iu1_power5)\
+ |(du3_power5+du4_power5+du1_power5,\
+ iu2_power5,nothing,iu1_power5,nothing,iu1_power5)\
+ |(du4_power5+du1_power5+du2_power5,\
+ iu1_power5,nothing,iu2_power5,nothing,iu2_power5)")
+
(define_insn_reservation "power5-insert" 4
(and (eq_attr "type" "insert_word")
(eq_attr "cpu" "power5"))
@@ -179,9 +205,7 @@
"(du1_power5,iu1_power5*6)\
|(du2_power5,iu2_power5*6)\
|(du3_power5,iu2_power5*6)\
- |(du4_power5,iu2_power5*6)")
-; |(du3_power5,nothing,iu2_power5*6)\
-; |(du4_power5,nothing,iu2_power5*6)")
+ |(du4_power5,iu1_power5*6)")
(define_insn_reservation "power5-imul" 5
(and (eq_attr "type" "imul")
@@ -190,8 +214,6 @@
|(du2_power5,iu2_power5*4)\
|(du3_power5,iu2_power5*4)\
|(du4_power5,iu1_power5*4)")
-; |(du3_power5,nothing,iu2_power5*4)\
-; |(du4_power5,nothing,iu1_power5*4)")
(define_insn_reservation "power5-imul3" 4
(and (eq_attr "type" "imul2,imul3")
@@ -200,8 +222,6 @@
|(du2_power5,iu2_power5*3)\
|(du3_power5,iu2_power5*3)\
|(du4_power5,iu1_power5*3)")
-; |(du3_power5,nothing,iu2_power5*3)\
-; |(du4_power5,nothing,iu1_power5*3)")
; SPR move only executes in first IU.
@@ -284,8 +304,6 @@
|(du2_power5,fpu2_power5*28)\
|(du3_power5,fpu2_power5*28)\
|(du4_power5,fpu1_power5*28)")
-; |(du3_power5,nothing,fpu2_power5*28)\
-; |(du4_power5,nothing,fpu1_power5*28)")
(define_insn_reservation "power5-sqrt" 40
(and (eq_attr "type" "ssqrt,dsqrt")
@@ -294,6 +312,10 @@
|(du2_power5,fpu2_power5*35)\
|(du3_power5,fpu2_power5*35)\
|(du4_power5,fpu2_power5*35)")
-; |(du3_power5,nothing,fpu2_power5*35)\
-; |(du4_power5,nothing,fpu2_power5*35)")
+
+(define_insn_reservation "power5-isync" 2
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5")
diff --git a/contrib/gcc/config/rs6000/ppc64-fp.c b/contrib/gcc/config/rs6000/ppc64-fp.c
index c736d9a..184f34e 100644
--- a/contrib/gcc/config/rs6000/ppc64-fp.c
+++ b/contrib/gcc/config/rs6000/ppc64-fp.c
@@ -2,7 +2,7 @@
libgcc2.c with macros expanded to force the use of specific types.
Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of GCC.
@@ -27,10 +27,11 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
-#if defined(__powerpc64__)
+#if defined(__powerpc64__) || defined (__64BIT__) || defined(__ppc64__)
+#define TMODES
#include "config/fp-bit.h"
extern DItype __fixtfdi (TFtype);
@@ -39,8 +40,11 @@ extern DItype __fixsfdi (SFtype);
extern USItype __fixunsdfsi (DFtype);
extern USItype __fixunssfsi (SFtype);
extern TFtype __floatditf (DItype);
+extern TFtype __floatunditf (UDItype);
extern DFtype __floatdidf (DItype);
+extern DFtype __floatundidf (UDItype);
extern SFtype __floatdisf (DItype);
+extern SFtype __floatundisf (UDItype);
extern DItype __fixunstfdi (TFtype);
static DItype local_fixunssfdi (SFtype);
@@ -100,6 +104,18 @@ __floatditf (DItype u)
return (TFtype) dh + (TFtype) dl;
}
+TFtype
+__floatunditf (UDItype u)
+{
+ DFtype dh, dl;
+
+ dh = (USItype) (u >> (sizeof (SItype) * 8));
+ dh *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ dl = (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (TFtype) dh + (TFtype) dl;
+}
+
DFtype
__floatdidf (DItype u)
{
@@ -112,6 +128,18 @@ __floatdidf (DItype u)
return d;
}
+DFtype
+__floatundidf (UDItype u)
+{
+ DFtype d;
+
+ d = (USItype) (u >> (sizeof (SItype) * 8));
+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return d;
+}
+
SFtype
__floatdisf (DItype u)
{
@@ -137,6 +165,30 @@ __floatdisf (DItype u)
return (SFtype) f;
}
+SFtype
+__floatundisf (UDItype u)
+{
+ DFtype f;
+
+ if (53 < (sizeof (DItype) * 8)
+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
+ {
+ if (u >= ((UDItype) 1 << 53))
+ {
+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
+ {
+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
+ }
+ }
+ }
+ f = (USItype) (u >> (sizeof (SItype) * 8));
+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (SFtype) f;
+}
+
DItype
__fixunstfdi (TFtype a)
{
diff --git a/contrib/gcc/config/rs6000/predicates.md b/contrib/gcc/config/rs6000/predicates.md
new file mode 100644
index 0000000..6aefe2d
--- /dev/null
+++ b/contrib/gcc/config/rs6000/predicates.md
@@ -0,0 +1,1307 @@
+;; Predicate definitions for POWER and PowerPC.
+;; Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; Return 1 for anything except PARALLEL.
+(define_predicate "any_operand"
+ (match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem"))
+
+;; Return 1 for any PARALLEL.
+(define_predicate "any_parallel_operand"
+ (match_code "parallel"))
+
+;; Return 1 if op is COUNT register.
+(define_predicate "count_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == COUNT_REGISTER_REGNUM
+ || REGNO (op) > LAST_VIRTUAL_REGISTER")))
+
+;; Return 1 if op is an Altivec register.
+(define_predicate "altivec_register_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || ALTIVEC_REGNO_P (REGNO (op))
+ || REGNO (op) > LAST_VIRTUAL_REGISTER")))
+
+;; Return 1 if op is XER register.
+(define_predicate "xer_operand"
+ (and (match_code "reg")
+ (match_test "XER_REGNO_P (REGNO (op))")))
+
+;; Return 1 if op is a signed 5-bit constant integer.
+(define_predicate "s5bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -16 && INTVAL (op) <= 15")))
+
+;; Return 1 if op is a unsigned 5-bit constant integer.
+(define_predicate "u5bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 31")))
+
+;; Return 1 if op is a signed 8-bit constant integer.
+;; Integer multiplication complete more quickly
+(define_predicate "s8bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -128 && INTVAL (op) <= 127")))
+
+;; Return 1 if op is a constant integer that can fit in a D field.
+(define_predicate "short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_I (op)")))
+
+;; Return 1 if op is a constant integer that can fit in an unsigned D field.
+(define_predicate "u_short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_K (op)")))
+
+;; Return 1 if op is a constant integer that cannot fit in a signed D field.
+(define_predicate "non_short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT)
+ (INTVAL (op) + 0x8000) >= 0x10000")))
+
+;; Return 1 if op is a positive constant integer that is an exact power of 2.
+(define_predicate "exact_log2_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) > 0 && exact_log2 (INTVAL (op)) >= 0")))
+
+;; Return 1 if op is a register that is not special.
+(define_predicate "gpc_reg_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "(GET_CODE (op) != REG
+ || (REGNO (op) >= ARG_POINTER_REGNUM
+ && !XER_REGNO_P (REGNO (op)))
+ || REGNO (op) < MQ_REGNO)
+ && !((TARGET_E500_DOUBLE || TARGET_SPE)
+ && invalid_e500_subreg (op, mode))")))
+
+;; Return 1 if op is a register that is a condition register field.
+(define_predicate "cc_reg_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) > LAST_VIRTUAL_REGISTER
+ || CR_REGNO_P (REGNO (op))")))
+
+;; Return 1 if op is a register that is a condition register field not cr0.
+(define_predicate "cc_reg_not_cr0_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) > LAST_VIRTUAL_REGISTER
+ || CR_REGNO_NOT_CR0_P (REGNO (op))")))
+
+;; Return 1 if op is a constant integer valid for D field
+;; or non-special register register.
+(define_predicate "reg_or_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_operand 0 "short_cint_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid whose negation is valid for
+;; D field or non-special register register.
+;; Do not allow a constant zero because all patterns that call this
+;; predicate use "addic r1,r2,-const" to set carry when r2 is greater than
+;; or equal to const, which does not work for zero.
+(define_predicate "reg_or_neg_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "satisfies_constraint_P (op)
+ && INTVAL (op) != 0")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for DS field
+;; or non-special register.
+(define_predicate "reg_or_aligned_short_operand"
+ (if_then_else (match_code "const_int")
+ (and (match_operand 0 "short_cint_operand")
+ (match_test "!(INTVAL (op) & 3)"))
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer whose high-order 16 bits are zero
+;; or non-special register.
+(define_predicate "reg_or_u_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_operand 0 "u_short_cint_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is any constant integer
+;; or non-special register.
+(define_predicate "reg_or_cint_operand"
+ (ior (match_code "const_int")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for addition
+;; or non-special register.
+(define_predicate "reg_or_add_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(HOST_BITS_PER_WIDE_INT == 32
+ && (mode == SImode || INTVAL (op) < 0x7fff8000))
+ || ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80008000)
+ < (unsigned HOST_WIDE_INT) 0x100000000ll)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for subtraction
+;; or non-special register.
+(define_predicate "reg_or_sub_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(HOST_BITS_PER_WIDE_INT == 32
+ && (mode == SImode || - INTVAL (op) < 0x7fff8000))
+ || ((unsigned HOST_WIDE_INT) (- INTVAL (op)
+ + (mode == SImode
+ ? 0x80000000 : 0x80008000))
+ < (unsigned HOST_WIDE_INT) 0x100000000ll)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is any 32-bit unsigned constant integer
+;; or non-special register.
+(define_predicate "reg_or_logical_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ && INTVAL (op) >= 0)
+ || ((INTVAL (op) & GET_MODE_MASK (mode)
+ & (~ (unsigned HOST_WIDE_INT) 0xffffffff)) == 0)")
+ (if_then_else (match_code "const_double")
+ (match_test "GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ && mode == DImode
+ && CONST_DOUBLE_HIGH (op) == 0")
+ (match_operand 0 "gpc_reg_operand"))))
+
+;; Return 1 if operand is a CONST_DOUBLE that can be set in a register
+;; with no more than one instruction per word.
+(define_predicate "easy_fp_constant"
+ (match_code "const_double")
+{
+ long k[4];
+ REAL_VALUE_TYPE rv;
+
+ if (GET_MODE (op) != mode
+ || (!SCALAR_FLOAT_MODE_P (mode) && mode != DImode))
+ return 0;
+
+ /* Consider all constants with -msoft-float to be easy. */
+ if ((TARGET_SOFT_FLOAT || TARGET_E500_SINGLE)
+ && mode != DImode)
+ return 1;
+
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return 0;
+
+ /* If we are using V.4 style PIC, consider all constants to be hard. */
+ if (flag_pic && DEFAULT_ABI == ABI_V4)
+ return 0;
+
+#ifdef TARGET_RELOCATABLE
+ /* Similarly if we are using -mrelocatable, consider all constants
+ to be hard. */
+ if (TARGET_RELOCATABLE)
+ return 0;
+#endif
+
+ switch (mode)
+ {
+ case TFmode:
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+
+ return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[2]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[3]) == 1);
+
+ case DFmode:
+ /* Force constants to memory before reload to utilize
+ compress_float_constant.
+ Avoid this when flag_unsafe_math_optimizations is enabled
+ because RDIV division to reciprocal optimization is not able
+ to regenerate the division. */
+ if (TARGET_E500_DOUBLE
+ || (!reload_in_progress && !reload_completed
+ && !flag_unsafe_math_optimizations))
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1);
+
+ case SFmode:
+ /* The constant 0.f is easy. */
+ if (op == CONST0_RTX (SFmode))
+ return 1;
+
+ /* Force constants to memory before reload to utilize
+ compress_float_constant.
+ Avoid this when flag_unsafe_math_optimizations is enabled
+ because RDIV division to reciprocal optimization is not able
+ to regenerate the division. */
+ if (!reload_in_progress && !reload_completed
+ && !flag_unsafe_math_optimizations)
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, k[0]);
+
+ return num_insns_constant_wide (k[0]) == 1;
+
+ case DImode:
+ return ((TARGET_POWERPC64
+ && GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_LOW (op) == 0)
+ || (num_insns_constant (op, DImode) <= 2));
+
+ case SImode:
+ return 1;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+;; Return 1 if the operand is a CONST_VECTOR and can be loaded into a
+;; vector register without using memory.
+(define_predicate "easy_vector_constant"
+ (match_code "const_vector")
+{
+ if (ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (zero_constant (op, mode))
+ return true;
+ return easy_altivec_constant (op, mode);
+ }
+
+ if (SPE_VECTOR_MODE (mode))
+ {
+ int cst, cst2;
+ if (zero_constant (op, mode))
+ return true;
+ if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
+ return false;
+
+ /* Limit SPE vectors to 15 bits signed. These we can generate with:
+ li r0, CONSTANT1
+ evmergelo r0, r0, r0
+ li r0, CONSTANT2
+
+ I don't know how efficient it would be to allow bigger constants,
+ considering we'll have an extra 'ori' for every 'li'. I doubt 5
+ instructions is better than a 64-bit memory load, but I don't
+ have the e500 timing specs. */
+ if (mode == V2SImode)
+ {
+ cst = INTVAL (CONST_VECTOR_ELT (op, 0));
+ cst2 = INTVAL (CONST_VECTOR_ELT (op, 1));
+ return cst >= -0x7fff && cst <= 0x7fff
+ && cst2 >= -0x7fff && cst2 <= 0x7fff;
+ }
+ }
+
+ return false;
+})
+
+;; Same as easy_vector_constant but only for EASY_VECTOR_15_ADD_SELF.
+(define_predicate "easy_vector_constant_add_self"
+ (and (match_code "const_vector")
+ (and (match_test "TARGET_ALTIVEC")
+ (match_test "easy_altivec_constant (op, mode)")))
+{
+ rtx last = CONST_VECTOR_ELT (op, GET_MODE_NUNITS (mode) - 1);
+ HOST_WIDE_INT val = ((INTVAL (last) & 0xff) ^ 0x80) - 0x80;
+ return EASY_VECTOR_15_ADD_SELF (val);
+})
+
+;; Return 1 if operand is constant zero (scalars and vectors).
+(define_predicate "zero_constant"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; Return 1 if operand is 0.0.
+;; or non-special register register field no cr0
+(define_predicate "zero_fp_constant"
+ (and (match_code "const_double")
+ (match_test "SCALAR_FLOAT_MODE_P (mode)
+ && op == CONST0_RTX (mode)")))
+
+;; Return 1 if the operand is in volatile memory. Note that during the
+;; RTL generation phase, memory_operand does not return TRUE for volatile
+;; memory references. So this function allows us to recognize volatile
+;; references where it's safe.
+(define_predicate "volatile_mem_operand"
+ (and (and (match_code "mem")
+ (match_test "MEM_VOLATILE_P (op)"))
+ (if_then_else (match_test "reload_completed")
+ (match_operand 0 "memory_operand")
+ (if_then_else (match_test "reload_in_progress")
+ (match_test "strict_memory_address_p (mode, XEXP (op, 0))")
+ (match_test "memory_address_p (mode, XEXP (op, 0))")))))
+
+;; Return 1 if the operand is an offsettable memory operand.
+(define_predicate "offsettable_mem_operand"
+ (and (match_code "mem")
+ (match_test "offsettable_address_p (reload_completed
+ || reload_in_progress,
+ mode, XEXP (op, 0))")))
+
+;; Return 1 if the operand is a memory operand with an address divisible by 4
+(define_predicate "word_offset_memref_operand"
+ (and (match_operand 0 "memory_operand")
+ (match_test "GET_CODE (XEXP (op, 0)) != PLUS
+ || ! REG_P (XEXP (XEXP (op, 0), 0))
+ || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (op, 0), 1)) % 4 == 0")))
+
+;; Return 1 if the operand is an indexed or indirect memory operand.
+(define_predicate "indexed_or_indirect_operand"
+ (match_code "mem")
+{
+ op = XEXP (op, 0);
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (op) == AND
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == -16)
+ op = XEXP (op, 0);
+
+ return indexed_or_indirect_address (op, mode);
+})
+
+;; Return 1 if the operand is an indexed or indirect address.
+(define_special_predicate "indexed_or_indirect_address"
+ (and (match_test "REG_P (op)
+ || (GET_CODE (op) == PLUS
+ /* Omit testing REG_P (XEXP (op, 0)). */
+ && REG_P (XEXP (op, 1)))")
+ (match_operand 0 "address_operand")))
+
+;; Used for the destination of the fix_truncdfsi2 expander.
+;; If stfiwx will be used, the result goes to memory; otherwise,
+;; we're going to emit a store and a load of a subreg, so the dest is a
+;; register.
+(define_predicate "fix_trunc_dest_operand"
+ (if_then_else (match_test "! TARGET_E500_DOUBLE && TARGET_PPC_GFXOPT")
+ (match_operand 0 "memory_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if the operand is either a non-special register or can be used
+;; as the operand of a `mode' add insn.
+(define_predicate "add_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "satisfies_constraint_I (op)
+ || satisfies_constraint_L (op)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if OP is a constant but not a valid add_operand.
+(define_predicate "non_add_cint_operand"
+ (and (match_code "const_int")
+ (match_test "!satisfies_constraint_I (op)
+ && !satisfies_constraint_L (op)")))
+
+;; Return 1 if the operand is a constant that can be used as the operand
+;; of an OR or XOR.
+(define_predicate "logical_const_operand"
+ (match_code "const_int,const_double")
+{
+ HOST_WIDE_INT opl, oph;
+
+ if (GET_CODE (op) == CONST_INT)
+ {
+ opl = INTVAL (op) & GET_MODE_MASK (mode);
+
+ if (HOST_BITS_PER_WIDE_INT <= 32
+ && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT && opl < 0)
+ return 0;
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ gcc_assert (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT);
+
+ opl = CONST_DOUBLE_LOW (op);
+ oph = CONST_DOUBLE_HIGH (op);
+ if (oph != 0)
+ return 0;
+ }
+ else
+ return 0;
+
+ return ((opl & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0
+ || (opl & ~ (unsigned HOST_WIDE_INT) 0xffff0000) == 0);
+})
+
+;; Return 1 if the operand is a non-special register or a constant that
+;; can be used as the operand of an OR or XOR.
+(define_predicate "logical_operand"
+ (ior (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_const_operand")))
+
+;; Return 1 if op is a constant that is not a logical operand, but could
+;; be split into one.
+(define_predicate "non_logical_cint_operand"
+ (and (match_code "const_int,const_double")
+ (and (not (match_operand 0 "logical_operand"))
+ (match_operand 0 "reg_or_logical_cint_operand"))))
+
+;; Return 1 if op is a constant that can be encoded in a 32-bit mask,
+;; suitable for use with rlwinm (no more than two 1->0 or 0->1
+;; transitions). Reject all ones and all zeros, since these should have
+;; been optimized away and confuse the making of MB and ME.
+(define_predicate "mask_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ if (TARGET_POWERPC64)
+ {
+ /* Fail if the mask is not 32-bit. */
+ if (mode == DImode && (c & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0)
+ return 0;
+
+ /* Fail if the mask wraps around because the upper 32-bits of the
+ mask will all be 1s, contrary to GCC's internal view. */
+ if ((c & 0x80000001) == 0x80000001)
+ return 0;
+ }
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Reject all zeros or all ones. */
+ if (c == 0)
+ return 0;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a second transition. */
+ c = ~c;
+
+ /* Erase first transition. */
+ c &= -lsb;
+
+ /* Find the second transition (if any). */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Return 1 for the PowerPC64 rlwinm corner case.
+(define_predicate "mask_operand_wrap"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ if ((c & 0x80000001) != 0x80000001)
+ return 0;
+
+ c = ~c;
+ if (c == 0)
+ return 0;
+
+ lsb = c & -c;
+ c = ~c;
+ c &= -lsb;
+ lsb = c & -c;
+ return c == -lsb;
+})
+
+;; Return 1 if the operand is a constant that is a PowerPC64 mask
+;; suitable for use with rldicl or rldicr (no more than one 1->0 or 0->1
+;; transition). Reject all zeros, since zero should have been
+;; optimized away and confuses the making of MB and ME.
+(define_predicate "mask64_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ /* Reject all zeros. */
+ if (c == 0)
+ return 0;
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Like mask64_operand, but allow up to three transitions. This
+;; predicate is used by insn patterns that generate two rldicl or
+;; rldicr machine insns.
+(define_predicate "mask64_2_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ /* Disallow all zeros. */
+ if (c == 0)
+ return 0;
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a second transition. */
+ c = ~c;
+
+ /* Erase first transition. */
+ c &= -lsb;
+
+ /* Find the second transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a third transition. */
+ c = ~c;
+
+ /* Erase second transition. */
+ c &= -lsb;
+
+ /* Find the third transition (if any). */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Like and_operand, but also match constants that can be implemented
+;; with two rldicl or rldicr insns.
+(define_predicate "and64_2_operand"
+ (ior (match_operand 0 "mask64_2_operand")
+ (if_then_else (match_test "fixed_regs[CR0_REGNO]")
+ (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_operand"))))
+
+;; Return 1 if the operand is either a non-special register or a
+;; constant that can be used as the operand of a logical AND.
+(define_predicate "and_operand"
+ (ior (match_operand 0 "mask_operand")
+ (ior (and (match_test "TARGET_POWERPC64 && mode == DImode")
+ (match_operand 0 "mask64_operand"))
+ (if_then_else (match_test "fixed_regs[CR0_REGNO]")
+ (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_operand")))))
+
+;; Return 1 if the operand is either a logical operand or a short cint operand.
+(define_predicate "scc_eq_operand"
+ (ior (match_operand 0 "logical_operand")
+ (match_operand 0 "short_cint_operand")))
+
+;; Return 1 if the operand is a general non-special register or memory operand.
+(define_predicate "reg_or_mem_operand"
+ (ior (match_operand 0 "memory_operand")
+ (ior (and (match_code "mem")
+ (match_test "macho_lo_sum_memory_operand (op, mode)"))
+ (ior (match_operand 0 "volatile_mem_operand")
+ (match_operand 0 "gpc_reg_operand")))))
+
+;; Return 1 if the operand is either an easy FP constant or memory or reg.
+(define_predicate "reg_or_none500mem_operand"
+ (if_then_else (match_code "mem")
+ (and (match_test "!TARGET_E500_DOUBLE")
+ (ior (match_operand 0 "memory_operand")
+ (ior (match_test "macho_lo_sum_memory_operand (op, mode)")
+ (match_operand 0 "volatile_mem_operand"))))
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if the operand is CONST_DOUBLE 0, register or memory operand.
+(define_predicate "zero_reg_mem_operand"
+ (ior (match_operand 0 "zero_fp_constant")
+ (match_operand 0 "reg_or_mem_operand")))
+
+;; Return 1 if the operand is a general register or memory operand without
+;; pre_inc or pre_dec, which produces invalid form of PowerPC lwa
+;; instruction.
+(define_predicate "lwa_operand"
+ (match_code "reg,subreg,mem")
+{
+ rtx inner = op;
+
+ if (reload_completed && GET_CODE (inner) == SUBREG)
+ inner = SUBREG_REG (inner);
+
+ return gpc_reg_operand (inner, mode)
+ || (memory_operand (inner, mode)
+ && GET_CODE (XEXP (inner, 0)) != PRE_INC
+ && GET_CODE (XEXP (inner, 0)) != PRE_DEC
+ && (GET_CODE (XEXP (inner, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (inner, 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (inner, 0), 1)) % 4 == 0));
+})
+
+;; Return 1 if the operand, used inside a MEM, is a SYMBOL_REF.
+(define_predicate "symbol_ref_operand"
+ (and (match_code "symbol_ref")
+ (match_test "(mode == VOIDmode || GET_MODE (op) == mode)
+ && (DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))")))
+
+;; Return 1 if op is an operand that can be loaded via the GOT.
+;; or non-special register register field no cr0
+(define_predicate "got_operand"
+ (match_code "symbol_ref,const,label_ref"))
+
+;; Return 1 if op is a simple reference that can be loaded via the GOT,
+;; excluding labels involving addition.
+(define_predicate "got_no_const_operand"
+ (match_code "symbol_ref,label_ref"))
+
+;; Return 1 if op is a SYMBOL_REF for a TLS symbol.
+(define_predicate "rs6000_tls_symbol_ref"
+ (and (match_code "symbol_ref")
+ (match_test "RS6000_SYMBOL_REF_TLS_P (op)")))
+
+;; Return 1 if the operand, used inside a MEM, is a valid first argument
+;; to CALL. This is a SYMBOL_REF, a pseudo-register, LR or CTR.
+(define_predicate "call_operand"
+ (if_then_else (match_code "reg")
+ (match_test "REGNO (op) == LINK_REGISTER_REGNUM
+ || REGNO (op) == COUNT_REGISTER_REGNUM
+ || REGNO (op) >= FIRST_PSEUDO_REGISTER")
+ (match_code "symbol_ref")))
+
+;; Return 1 if the operand is a SYMBOL_REF for a function known to be in
+;; this file.
+(define_predicate "current_file_function_operand"
+ (and (match_code "symbol_ref")
+ (match_test "(DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))
+ && (SYMBOL_REF_LOCAL_P (op)
+ || (op == XEXP (DECL_RTL (current_function_decl),
+ 0)))")))
+
+;; Return 1 if this operand is a valid input for a move insn.
+(define_predicate "input_operand"
+ (match_code "label_ref,symbol_ref,const,high,reg,subreg,mem,
+ const_double,const_vector,const_int,plus")
+{
+ /* Memory is always valid. */
+ if (memory_operand (op, mode))
+ return 1;
+
+ /* For floating-point, easy constants are valid. */
+ if (SCALAR_FLOAT_MODE_P (mode)
+ && CONSTANT_P (op)
+ && easy_fp_constant (op, mode))
+ return 1;
+
+ /* Allow any integer constant. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && (GET_CODE (op) == CONST_INT
+ || GET_CODE (op) == CONST_DOUBLE))
+ return 1;
+
+ /* Allow easy vector constants. */
+ if (GET_CODE (op) == CONST_VECTOR
+ && easy_vector_constant (op, mode))
+ return 1;
+
+ /* Do not allow invalid E500 subregs. */
+ if ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && GET_CODE (op) == SUBREG
+ && invalid_e500_subreg (op, mode))
+ return 0;
+
+ /* For floating-point or multi-word mode, the only remaining valid type
+ is a register. */
+ if (SCALAR_FLOAT_MODE_P (mode)
+ || GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return register_operand (op, mode);
+
+ /* The only cases left are integral modes one word or smaller (we
+ do not get called for MODE_CC values). These can be in any
+ register. */
+ if (register_operand (op, mode))
+ return 1;
+
+ /* A SYMBOL_REF referring to the TOC is valid. */
+ if (legitimate_constant_pool_address_p (op))
+ return 1;
+
+ /* A constant pool expression (relative to the TOC) is valid */
+ if (toc_relative_expr_p (op))
+ return 1;
+
+ /* V.4 allows SYMBOL_REFs and CONSTs that are in the small data region
+ to be valid. */
+ if (DEFAULT_ABI == ABI_V4
+ && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)
+ && small_data_operand (op, Pmode))
+ return 1;
+
+ return 0;
+})
+
+;; Return true if OP is an invalid SUBREG operation on the e500.
+(define_predicate "rs6000_nonimmediate_operand"
+ (match_code "reg,subreg,mem")
+{
+ if ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && GET_CODE (op) == SUBREG
+ && invalid_e500_subreg (op, mode))
+ return 0;
+
+ return nonimmediate_operand (op, mode);
+})
+
+;; Return true if operand is boolean operator.
+(define_predicate "boolean_operator"
+ (match_code "and,ior,xor"))
+
+;; Return true if operand is OR-form of boolean operator.
+(define_predicate "boolean_or_operator"
+ (match_code "ior,xor"))
+
+;; Return true if operand is an equality operator.
+(define_special_predicate "equality_operator"
+ (match_code "eq,ne"))
+
+;; Return true if operand is MIN or MAX operator.
+(define_predicate "min_max_operator"
+ (match_code "smin,smax,umin,umax"))
+
+;; Return 1 if OP is a comparison operation that is valid for a branch
+;; instruction. We check the opcode against the mode of the CC value.
+;; validate_condition_mode is an assertion.
+(define_predicate "branch_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (and (match_test "GET_MODE_CLASS (GET_MODE (XEXP (op, 0))) == MODE_CC")
+ (match_test "validate_condition_mode (GET_CODE (op),
+ GET_MODE (XEXP (op, 0))),
+ 1"))))
+
+;; Return 1 if OP is a comparison operation that is valid for an SCC insn --
+;; it must be a positive comparison.
+(define_predicate "scc_comparison_operator"
+ (and (match_operand 0 "branch_comparison_operator")
+ (match_code "eq,lt,gt,ltu,gtu,unordered")))
+
+;; Return 1 if OP is a comparison operation that is valid for a branch
+;; insn, which is true if the corresponding bit in the CC register is set.
+(define_predicate "branch_positive_comparison_operator"
+ (and (match_operand 0 "branch_comparison_operator")
+ (match_code "eq,lt,gt,ltu,gtu,unordered")))
+
+;; Return 1 is OP is a comparison operation that is valid for a trap insn.
+(define_predicate "trap_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (match_code "eq,ne,le,lt,ge,gt,leu,ltu,geu,gtu")))
+
+;; Return 1 if OP is a load multiple operation, known to be a PARALLEL.
+(define_predicate "load_multiple_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is a store multiple operation, known to be a PARALLEL.
+;; The second vector element is a CLOBBER.
+(define_predicate "store_multiple_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0) - 1;
+ unsigned int src_regno;
+ rtx dest_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i + 1);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for a save_world call in prologue, known to be
+;; a PARLLEL.
+(define_predicate "save_world_operation"
+ (match_code "parallel")
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 55)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), DFmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != DFmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+ }
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || REGNO (SET_SRC (elt)) != CR2_REGNO
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER)
+ return 0;
+ return 1;
+})
+
+;; Return 1 if OP is valid for a restore_world call in epilogue, known to be
+;; a PARLLEL.
+(define_predicate "restore_world_operation"
+ (match_code "parallel")
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 59)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != RETURN
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER)
+ return 0;
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || REGNO (SET_DEST (elt)) != CR2_REGNO
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), DFmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != DFmode)
+ return 0;
+ }
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+ return 1;
+})
+
+;; Return 1 if OP is valid for a vrsave call, known to be a PARALLEL.
+(define_predicate "vrsave_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno, src_regno;
+ int i;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC_VOLATILE
+ || XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPECV_SET_VRSAVE)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_regno = REGNO (XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 1));
+
+ if (dest_regno != VRSAVE_REGNO || src_regno != VRSAVE_REGNO)
+ return 0;
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != CLOBBER
+ && GET_CODE (elt) != SET)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for mfcr insn, known to be a PARALLEL.
+(define_predicate "mfcr_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count < 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
+ || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx exp = XVECEXP (op, 0, i);
+ rtx unspec;
+ int maskval;
+ rtx src_reg;
+
+ src_reg = XVECEXP (SET_SRC (exp), 0, 0);
+
+ if (GET_CODE (src_reg) != REG
+ || GET_MODE (src_reg) != CCmode
+ || ! CR_REGNO_P (REGNO (src_reg)))
+ return 0;
+
+ if (GET_CODE (exp) != SET
+ || GET_CODE (SET_DEST (exp)) != REG
+ || GET_MODE (SET_DEST (exp)) != SImode
+ || ! INT_REGNO_P (REGNO (SET_DEST (exp))))
+ return 0;
+ unspec = SET_SRC (exp);
+ maskval = 1 << (MAX_CR_REGNO - REGNO (src_reg));
+
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_MOVESI_FROM_CR
+ || XVECLEN (unspec, 0) != 2
+ || XVECEXP (unspec, 0, 0) != src_reg
+ || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
+ || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
+ return 0;
+ }
+ return 1;
+})
+
+;; Return 1 if OP is valid for mtcrf insn, known to be a PARALLEL.
+(define_predicate "mtcrf_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+ rtx src_reg;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count < 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
+ || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
+ return 0;
+ src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0);
+
+ if (GET_CODE (src_reg) != REG
+ || GET_MODE (src_reg) != SImode
+ || ! INT_REGNO_P (REGNO (src_reg)))
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx exp = XVECEXP (op, 0, i);
+ rtx unspec;
+ int maskval;
+
+ if (GET_CODE (exp) != SET
+ || GET_CODE (SET_DEST (exp)) != REG
+ || GET_MODE (SET_DEST (exp)) != CCmode
+ || ! CR_REGNO_P (REGNO (SET_DEST (exp))))
+ return 0;
+ unspec = SET_SRC (exp);
+ maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp)));
+
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_MOVESI_TO_CR
+ || XVECLEN (unspec, 0) != 2
+ || XVECEXP (unspec, 0, 0) != src_reg
+ || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
+ || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
+ return 0;
+ }
+ return 1;
+})
+
+;; Return 1 if OP is valid for lmw insn, known to be a PARALLEL.
+(define_predicate "lmw_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ if (dest_regno > 31
+ || count != 32 - (int) dest_regno)
+ return 0;
+
+ if (legitimate_indirect_address_p (src_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (src_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, src_addr, 0))
+ {
+ offset = INTVAL (XEXP (src_addr, 1));
+ base_regno = REGNO (XEXP (src_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_SRC (elt), 0);
+ if (legitimate_indirect_address_p (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for stmw insn, known to be a PARALLEL.
+(define_predicate "stmw_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int src_regno;
+ rtx dest_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ if (src_regno > 31
+ || count != 32 - (int) src_regno)
+ return 0;
+
+ if (legitimate_indirect_address_p (dest_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (dest_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, dest_addr, 0))
+ {
+ offset = INTVAL (XEXP (dest_addr, 1));
+ base_regno = REGNO (XEXP (dest_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_DEST (elt), 0);
+ if (legitimate_indirect_address_p (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+})
diff --git a/contrib/gcc/config/rs6000/rios1.md b/contrib/gcc/config/rs6000/rios1.md
index 5e77a67..59b34c5 100644
--- a/contrib/gcc/config/rs6000/rios1.md
+++ b/contrib/gcc/config/rs6000/rios1.md
@@ -1,5 +1,5 @@
;; Scheduling description for IBM POWER processor.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,8 +15,8 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "rios1,rios1fp")
(define_cpu_unit "iu_rios1" "rios1")
@@ -26,11 +26,12 @@
;; RIOS1 32-bit IU, FPU, BPU
(define_insn_reservation "rios1-load" 2
- (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
(eq_attr "cpu" "rios1,ppc601"))
"iu_rios1")
-(define_insn_reservation "rios1-store" 1
+(define_insn_reservation "rios1-store" 2
(and (eq_attr "type" "store,store_ux,store_u")
(eq_attr "cpu" "rios1,ppc601"))
"iu_rios1")
@@ -45,7 +46,7 @@
(eq_attr "cpu" "ppc601"))
"iu_rios1")
-(define_insn_reservation "rios1-fpstore" 1
+(define_insn_reservation "rios1-fpstore" 3
(and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
(eq_attr "cpu" "rios1,ppc601"))
"iu_rios1+fpu_rios1")
@@ -55,6 +56,16 @@
(eq_attr "cpu" "rios1,ppc601"))
"iu_rios1")
+(define_insn_reservation "rios1-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,iu_rios1")
+
+(define_insn_reservation "rios1-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,iu_rios1,iu_rios1")
+
(define_insn_reservation "rios1-imul" 5
(and (eq_attr "type" "imul,imul_compare")
(eq_attr "cpu" "rios1"))
@@ -173,7 +184,7 @@
"iu_rios1,bpu_rios1")
(define_insn_reservation "rios1-branch" 1
- (and (eq_attr "type" "jmpreg,branch")
+ (and (eq_attr "type" "jmpreg,branch,isync")
(eq_attr "cpu" "rios1,ppc601"))
"bpu_rios1")
diff --git a/contrib/gcc/config/rs6000/rios2.md b/contrib/gcc/config/rs6000/rios2.md
index 36690ac..b2f5cb2 100644
--- a/contrib/gcc/config/rs6000/rios2.md
+++ b/contrib/gcc/config/rs6000/rios2.md
@@ -1,5 +1,5 @@
;; Scheduling description for IBM Power2 processor.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,8 +15,8 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "rios2,rios2fp")
(define_cpu_unit "iu1_rios2,iu2_rios2" "rios2")
@@ -29,11 +29,12 @@
(define_insn_reservation "rios2-load" 2
(and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
- load_ux,load_u,fpload,fpload_ux,fpload_u")
+ load_ux,load_u,fpload,fpload_ux,fpload_u,\
+ load_l,store_c,sync")
(eq_attr "cpu" "rios2"))
"iu1_rios2|iu2_rios2")
-(define_insn_reservation "rios2-store" 1
+(define_insn_reservation "rios2-store" 2
(and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
(eq_attr "cpu" "rios2"))
"iu1_rios2|iu2_rios2")
@@ -43,6 +44,16 @@
(eq_attr "cpu" "rios2"))
"iu1_rios2|iu2_rios2")
+(define_insn_reservation "rios2-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2")
+
(define_insn_reservation "rios2-imul" 2
(and (eq_attr "type" "imul,imul2,imul3,imul_compare")
(eq_attr "cpu" "rios2"))
@@ -111,7 +122,7 @@
"iu1_rios2,bpu_rios2")
(define_insn_reservation "rios2-branch" 1
- (and (eq_attr "type" "jmpreg,branch")
+ (and (eq_attr "type" "jmpreg,branch,isync")
(eq_attr "cpu" "rios2"))
"bpu_rios2")
diff --git a/contrib/gcc/config/rs6000/rs6000-c.c b/contrib/gcc/config/rs6000/rs6000-c.c
index 13d0ca6..5c6abd10 100644
--- a/contrib/gcc/config/rs6000/rs6000-c.c
+++ b/contrib/gcc/config/rs6000/rs6000-c.c
@@ -1,8 +1,9 @@
/* Subroutines for the C front end on the POWER and PowerPC architectures.
- Copyright (C) 2002, 2003
+ Copyright (C) 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
Contributed by Zack Weinberg <zack@codesourcery.com>
+ and Paolo Bonzini <bonzini@gnu.org>
This file is part of GCC.
@@ -18,8 +19,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#include "config.h"
#include "system.h"
@@ -27,9 +28,15 @@
#include "tm.h"
#include "cpplib.h"
#include "tree.h"
+#include "c-common.h"
#include "c-pragma.h"
-#include "errors.h"
+#include "c-tree.h"
+#include "toplev.h"
#include "tm_p.h"
+#include "target.h"
+#include "langhooks.h"
+
+
/* Handle the machine specific pragma longcall. Its syntax is
@@ -41,10 +48,10 @@
whether or not new function declarations receive a longcall
attribute by default. */
-#define SYNTAX_ERROR(msgid) do { \
- warning (msgid); \
- warning ("ignoring malformed #pragma longcall"); \
- return; \
+#define SYNTAX_ERROR(gmsgid) do { \
+ warning (OPT_Wpragmas, gmsgid); \
+ warning (OPT_Wpragmas, "ignoring malformed #pragma longcall"); \
+ return; \
} while (0)
void
@@ -55,20 +62,20 @@ rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
/* If we get here, generic code has already scanned the directive
leader and the word "longcall". */
- if (c_lex (&x) != CPP_OPEN_PAREN)
+ if (pragma_lex (&x) != CPP_OPEN_PAREN)
SYNTAX_ERROR ("missing open paren");
- if (c_lex (&n) != CPP_NUMBER)
+ if (pragma_lex (&n) != CPP_NUMBER)
SYNTAX_ERROR ("missing number");
- if (c_lex (&x) != CPP_CLOSE_PAREN)
+ if (pragma_lex (&x) != CPP_CLOSE_PAREN)
SYNTAX_ERROR ("missing close paren");
- if (!integer_zerop (n) && !integer_onep (n))
+ if (n != integer_zero_node && n != integer_one_node)
SYNTAX_ERROR ("number must be 0 or 1");
- if (c_lex (&x) != CPP_EOF)
- warning ("junk at end of #pragma longcall");
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma longcall");
- rs6000_default_long_calls = integer_onep (n);
+ rs6000_default_long_calls = (n == integer_one_node);
}
/* Handle defining many CPP flags based on TARGET_xxx. As a general
@@ -87,8 +94,18 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
builtin_define ("_ARCH_PWR");
if (TARGET_POWERPC)
builtin_define ("_ARCH_PPC");
+ if (TARGET_PPC_GPOPT)
+ builtin_define ("_ARCH_PPCSQ");
+ if (TARGET_PPC_GFXOPT)
+ builtin_define ("_ARCH_PPCGR");
if (TARGET_POWERPC64)
builtin_define ("_ARCH_PPC64");
+ if (TARGET_MFCRF)
+ builtin_define ("_ARCH_PWR4");
+ if (TARGET_POPCNTB)
+ builtin_define ("_ARCH_PWR5");
+ if (TARGET_FPRND)
+ builtin_define ("_ARCH_PWR5X");
if (! TARGET_POWER && ! TARGET_POWER2 && ! TARGET_POWERPC)
builtin_define ("_ARCH_COM");
if (TARGET_ALTIVEC)
@@ -108,6 +125,9 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
/* Used by lwarx/stwcx. errata work-around. */
if (rs6000_cpu == PROCESSOR_PPC405)
builtin_define ("__PPC405__");
+ /* Used by libstdc++. */
+ if (TARGET_NO_LWSYNC)
+ builtin_define ("__NO_LWSYNC__");
/* May be overridden by target configuration. */
RS6000_CPU_CPP_ENDIAN_BUILTINS();
@@ -130,4 +150,2430 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
default:
break;
}
+
+ /* Let the compiled code know if 'f' class registers will not be available. */
+ if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ builtin_define ("__NO_FPRS__");
+}
+
+
+struct altivec_builtin_types
+{
+ enum rs6000_builtins code;
+ enum rs6000_builtins overloaded_code;
+ signed char ret_type;
+ signed char op1;
+ signed char op2;
+ signed char op3;
+};
+
+const struct altivec_builtin_types altivec_overloaded_builtins[] = {
+ /* Unary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_CEIL, ALTIVEC_BUILTIN_VRFIP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_EXPTE, ALTIVEC_BUILTIN_VEXPTEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_FLOOR, ALTIVEC_BUILTIN_VRFIM,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_LOGE, ALTIVEC_BUILTIN_VLOGEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_RE, ALTIVEC_BUILTIN_VREFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ROUND, ALTIVEC_BUILTIN_VRFIN,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_RSQRTE, ALTIVEC_BUILTIN_VRSQRTEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_TRUNC, ALTIVEC_BUILTIN_VRFIZ,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+
+ /* Binary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDFP, ALTIVEC_BUILTIN_VADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDC, ALTIVEC_BUILTIN_VADDCUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSW, ALTIVEC_BUILTIN_VAVGSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUW, ALTIVEC_BUILTIN_VAVGUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSH, ALTIVEC_BUILTIN_VAVGSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUH, ALTIVEC_BUILTIN_VAVGUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSB, ALTIVEC_BUILTIN_VAVGSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUB, ALTIVEC_BUILTIN_VAVGUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPB, ALTIVEC_BUILTIN_VCMPBFP,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQFP, ALTIVEC_BUILTIN_VCMPEQFP,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGE, ALTIVEC_BUILTIN_VCMPGEFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTFP, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLE, ALTIVEC_BUILTIN_VCMPGEFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFUX,
+ RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFSX,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCFSX, ALTIVEC_BUILTIN_VCFSX,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCFUX, ALTIVEC_BUILTIN_VCFUX,
+ RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTS, ALTIVEC_BUILTIN_VCTSXS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTU, ALTIVEC_BUILTIN_VCTUXS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXFP, ALTIVEC_BUILTIN_VMAXFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINFP, ALTIVEC_BUILTIN_VMINFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULEUB, ALTIVEC_BUILTIN_VMULEUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULESB, ALTIVEC_BUILTIN_VMULESB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULEUH, ALTIVEC_BUILTIN_VMULEUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULESH, ALTIVEC_BUILTIN_VMULESH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOSH, ALTIVEC_BUILTIN_VMULOSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOUH, ALTIVEC_BUILTIN_VMULOUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOSB, ALTIVEC_BUILTIN_VMULOSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOUB, ALTIVEC_BUILTIN_VMULOUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKPX, ALTIVEC_BUILTIN_VPKPX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSHSS,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSWSS,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSWSS, ALTIVEC_BUILTIN_VPKSWSS,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUS, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSHSS, ALTIVEC_BUILTIN_VPKSHSS,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUS, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSWUS, ALTIVEC_BUILTIN_VPKSWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSHUS, ALTIVEC_BUILTIN_VPKSHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBFP, ALTIVEC_BUILTIN_VSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4UBS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SBS,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4SHS, ALTIVEC_BUILTIN_VSUM4SHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4SBS, ALTIVEC_BUILTIN_VSUM4SBS,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4UBS, ALTIVEC_BUILTIN_VSUM4UBS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM2S, ALTIVEC_BUILTIN_VSUM2SWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUMS, ALTIVEC_BUILTIN_VSUMSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+
+ /* Ternary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_MADDS, ALTIVEC_BUILTIN_VMHADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MRADDS, ALTIVEC_BUILTIN_VMHRADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUBM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMMBM,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUHM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMSHM,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMSHM, ALTIVEC_BUILTIN_VMSUMSHM,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUHM, ALTIVEC_BUILTIN_VMSUMUHM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMMBM, ALTIVEC_BUILTIN_VMSUMMBM,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUBM, ALTIVEC_BUILTIN_VMSUMUBM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMUHS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMSHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMSHS, ALTIVEC_BUILTIN_VMSUMSHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUHS, ALTIVEC_BUILTIN_VMSUMUHS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_NMSUB, ALTIVEC_BUILTIN_VNMSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+
+ /* Predicates. */
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+
+ /* cmpge is the same as cmpgt for all cases except floating point.
+ There is further code to deal with this special case in
+ altivec_build_resolved_builtin. */
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGEFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+ { 0, 0, 0, 0, 0, 0 }
+};
+
+
+/* Convert a type stored into a struct altivec_builtin_types as ID,
+ into a tree. The types are in rs6000_builtin_types: negative values
+ create a pointer type for the type associated to ~ID. Note it is
+ a logical NOT, rather than a negation, otherwise you cannot represent
+ a pointer type for ID 0. */
+
+static inline tree
+rs6000_builtin_type (int id)
+{
+ tree t;
+ t = rs6000_builtin_types[id < 0 ? ~id : id];
+ return id < 0 ? build_pointer_type (t) : t;
+}
+
+/* Check whether the type of an argument, T, is compatible with a
+ type ID stored into a struct altivec_builtin_types. Integer
+ types are considered compatible; otherwise, the language hook
+ lang_hooks.types_compatible_p makes the decision. */
+
+static inline bool
+rs6000_builtin_type_compatible (tree t, int id)
+{
+ tree builtin_type;
+ builtin_type = rs6000_builtin_type (id);
+ if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
+ return true;
+ else
+ return lang_hooks.types_compatible_p (t, builtin_type);
}
+
+
+/* Build a tree for a function call to an Altivec non-overloaded builtin.
+ The overloaded builtin that matched the types and args is described
+ by DESC. The N arguments are given in ARGS, respectively.
+
+ Actually the only thing it does is calling fold_convert on ARGS, with
+ a small exception for vec_{all,any}_{ge,le} predicates. */
+
+static tree
+altivec_build_resolved_builtin (tree *args, int n,
+ const struct altivec_builtin_types *desc)
+{
+ tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code];
+ tree ret_type = rs6000_builtin_type (desc->ret_type);
+ tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl));
+ tree arglist = NULL_TREE, arg_type[3];
+
+ int i;
+ for (i = 0; i < n; i++)
+ arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes);
+
+ /* The AltiVec overloading implementation is overall gross, but this
+ is particularly disgusting. The vec_{all,any}_{ge,le} builtins
+ are completely different for floating-point vs. integer vector
+ types, because the former has vcmpgefp, but the latter should use
+ vcmpgtXX.
+
+ In practice, the second and third arguments are swapped, and the
+ condition (LT vs. EQ, which is recognizable by bit 1 of the first
+ argument) is reversed. Patch the arguments here before building
+ the resolved CALL_EXPR. */
+ if (desc->code == ALTIVEC_BUILTIN_VCMPGE_P
+ && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P)
+ {
+ tree t;
+ t = args[2], args[2] = args[1], args[1] = t;
+ t = arg_type[2], arg_type[2] = arg_type[1], arg_type[1] = t;
+
+ args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
+ build_int_cst (NULL_TREE, 2));
+ }
+
+ while (--n >= 0)
+ arglist = tree_cons (NULL_TREE,
+ fold_convert (arg_type[n], args[n]),
+ arglist);
+
+ return fold_convert (ret_type,
+ build_function_call_expr (impl_fndecl, arglist));
+}
+
+/* Implementation of the resolve_overloaded_builtin target hook, to
+ support Altivec's overloaded builtins. */
+
+tree
+altivec_resolve_overloaded_builtin (tree fndecl, tree arglist)
+{
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ tree types[3], args[3];
+ const struct altivec_builtin_types *desc;
+ int n;
+
+ if (fcode < ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ || fcode > ALTIVEC_BUILTIN_OVERLOADED_LAST)
+ return NULL_TREE;
+
+ for (n = 0;
+ !VOID_TYPE_P (TREE_VALUE (fnargs)) && arglist;
+ fnargs = TREE_CHAIN (fnargs), arglist = TREE_CHAIN (arglist), n++)
+ {
+ tree decl_type = TREE_VALUE (fnargs);
+ tree arg = TREE_VALUE (arglist);
+ tree type;
+
+ if (arg == error_mark_node)
+ return error_mark_node;
+
+ if (n >= 3)
+ abort ();
+
+ arg = default_conversion (arg);
+
+ /* The C++ front-end converts float * to const void * using
+ NOP_EXPR<const void *> (NOP_EXPR<void *> (x)). */
+ type = TREE_TYPE (arg);
+ if (POINTER_TYPE_P (type)
+ && TREE_CODE (arg) == NOP_EXPR
+ && lang_hooks.types_compatible_p (TREE_TYPE (arg),
+ const_ptr_type_node)
+ && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
+ ptr_type_node))
+ {
+ arg = TREE_OPERAND (arg, 0);
+ type = TREE_TYPE (arg);
+ }
+
+ /* Remove the const from the pointers to simplify the overload
+ matching further down. */
+ if (POINTER_TYPE_P (decl_type)
+ && POINTER_TYPE_P (type)
+ && TYPE_QUALS (TREE_TYPE (type)) != 0)
+ {
+ if (TYPE_READONLY (TREE_TYPE (type))
+ && !TYPE_READONLY (TREE_TYPE (decl_type)))
+ warning (0, "passing arg %d of %qE discards qualifiers from"
+ "pointer target type", n + 1, fndecl);
+ type = build_pointer_type (build_qualified_type (TREE_TYPE (type),
+ 0));
+ arg = fold_convert (type, arg);
+ }
+
+ args[n] = arg;
+ types[n] = type;
+ }
+
+ /* If the number of arguments did not match the prototype, return NULL
+ and the generic code will issue the appropriate error message. */
+ if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || arglist)
+ return NULL;
+
+ if (n == 0)
+ abort ();
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_STEP)
+ {
+ if (TREE_CODE (types[0]) != VECTOR_TYPE)
+ goto bad;
+
+ return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
+ }
+
+ for (desc = altivec_overloaded_builtins;
+ desc->code && desc->code != fcode; desc++)
+ continue;
+
+ /* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in
+ the opX fields. */
+ for (; desc->code == fcode; desc++)
+ if ((desc->op1 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[0], desc->op1))
+ && (desc->op2 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[1], desc->op2))
+ && (desc->op3 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[2], desc->op3)))
+ return altivec_build_resolved_builtin (args, n, desc);
+
+ bad:
+ error ("invalid parameter combination for AltiVec intrinsic");
+ return error_mark_node;
+}
+
diff --git a/contrib/gcc/config/rs6000/rs6000-modes.def b/contrib/gcc/config/rs6000/rs6000-modes.def
index 6f17f1a..c0599b3 100644
--- a/contrib/gcc/config/rs6000/rs6000-modes.def
+++ b/contrib/gcc/config/rs6000/rs6000-modes.def
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for IBM RS/6000.
- Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
@@ -16,8 +16,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* 128-bit floating point. ABI_V4 uses IEEE quad, AIX/Darwin
adjust this in rs6000_override_options. */
@@ -38,3 +38,10 @@ PARTIAL_INT_MODE (SI);
CC_MODE (CCUNS);
CC_MODE (CCFP);
CC_MODE (CCEQ);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */
+VECTOR_MODE (INT, DI, 1);
+VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
+VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
diff --git a/contrib/gcc/config/rs6000/rs6000-protos.h b/contrib/gcc/config/rs6000/rs6000-protos.h
index 3303ecd..0cb5a50 100644
--- a/contrib/gcc/config/rs6000/rs6000-protos.h
+++ b/contrib/gcc/config/rs6000/rs6000-protos.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for IBM RS/6000.
- Copyright (C) 2000, 2001, 2002, 2003, 2004
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#ifndef GCC_RS6000_PROTOS_H
#define GCC_RS6000_PROTOS_H
@@ -32,83 +32,42 @@ extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, int, int, int);
extern void rs6000_va_start (tree, rtx);
#endif /* TREE_CODE */
-extern struct rtx_def *rs6000_got_register (rtx);
-extern struct rtx_def *find_addr_reg (rtx);
-extern int any_operand (rtx, enum machine_mode);
-extern int short_cint_operand (rtx, enum machine_mode);
-extern int u_short_cint_operand (rtx, enum machine_mode);
-extern int non_short_cint_operand (rtx, enum machine_mode);
-extern int exact_log2_cint_operand (rtx, enum machine_mode);
-extern int gpc_reg_operand (rtx, enum machine_mode);
-extern int cc_reg_operand (rtx, enum machine_mode);
-extern int cc_reg_not_cr0_operand (rtx, enum machine_mode);
-extern int reg_or_short_operand (rtx, enum machine_mode);
-extern int reg_or_neg_short_operand (rtx, enum machine_mode);
-extern int reg_or_aligned_short_operand (rtx, enum machine_mode);
-extern int reg_or_u_short_operand (rtx, enum machine_mode);
-extern int reg_or_cint_operand (rtx, enum machine_mode);
-extern int reg_or_arith_cint_operand (rtx, enum machine_mode);
-extern int reg_or_add_cint64_operand (rtx, enum machine_mode);
-extern int reg_or_sub_cint64_operand (rtx, enum machine_mode);
-extern int reg_or_logical_cint_operand (rtx, enum machine_mode);
-extern int got_operand (rtx, enum machine_mode);
-extern int word_offset_memref_operand (rtx, enum machine_mode);
-extern int got_no_const_operand (rtx, enum machine_mode);
+extern bool easy_altivec_constant (rtx, enum machine_mode);
+extern bool macho_lo_sum_memory_operand (rtx, enum machine_mode);
extern int num_insns_constant (rtx, enum machine_mode);
-extern int easy_fp_constant (rtx, enum machine_mode);
-extern int easy_vector_constant (rtx, enum machine_mode);
-extern rtx gen_easy_vector_constant_add_self (rtx);
-extern const char *output_vec_const_move (rtx *);
-extern int zero_fp_constant (rtx, enum machine_mode);
-extern int zero_constant (rtx, enum machine_mode);
-extern int volatile_mem_operand (rtx, enum machine_mode);
-extern int offsettable_mem_operand (rtx, enum machine_mode);
-extern int mem_or_easy_const_operand (rtx, enum machine_mode);
-extern int add_operand (rtx, enum machine_mode);
-extern int non_add_cint_operand (rtx, enum machine_mode);
-extern int non_logical_cint_operand (rtx, enum machine_mode);
-extern int logical_operand (rtx, enum machine_mode);
-extern int mask_operand (rtx, enum machine_mode);
-extern int mask_operand_wrap (rtx, enum machine_mode);
-extern int mask64_operand (rtx, enum machine_mode);
-extern int mask64_2_operand (rtx, enum machine_mode);
-extern void build_mask64_2_operands (rtx, rtx *);
-extern int and64_operand (rtx, enum machine_mode);
-extern int and64_2_operand (rtx, enum machine_mode);
-extern int and_operand (rtx, enum machine_mode);
-extern int count_register_operand (rtx, enum machine_mode);
-extern int xer_operand (rtx, enum machine_mode);
-extern int reg_or_mem_operand (rtx, enum machine_mode);
-extern int lwa_operand (rtx, enum machine_mode);
-extern int call_operand (rtx, enum machine_mode);
-extern int current_file_function_operand (rtx, enum machine_mode);
-extern int input_operand (rtx, enum machine_mode);
+extern int num_insns_constant_wide (HOST_WIDE_INT);
extern int small_data_operand (rtx, enum machine_mode);
-extern int s8bit_cint_operand (rtx, enum machine_mode);
+extern bool toc_relative_expr_p (rtx);
+extern bool invalid_e500_subreg (rtx, enum machine_mode);
+extern void validate_condition_mode (enum rtx_code, enum machine_mode);
extern bool legitimate_constant_pool_address_p (rtx);
+extern bool legitimate_indirect_address_p (rtx, int);
+
+extern rtx rs6000_got_register (rtx);
+extern rtx find_addr_reg (rtx);
+extern rtx gen_easy_altivec_constant (rtx);
+extern const char *output_vec_const_move (rtx *);
+extern void rs6000_expand_vector_init (rtx, rtx);
+extern void rs6000_expand_vector_set (rtx, rtx, int);
+extern void rs6000_expand_vector_extract (rtx, rtx, int);
+extern void build_mask64_2_operands (rtx, rtx *);
+extern int expand_block_clear (rtx[]);
extern int expand_block_move (rtx[]);
-extern int load_multiple_operation (rtx, enum machine_mode);
extern const char * rs6000_output_load_multiple (rtx[]);
-extern int store_multiple_operation (rtx, enum machine_mode);
-extern int branch_comparison_operator (rtx, enum machine_mode);
-extern int branch_positive_comparison_operator (rtx, enum machine_mode);
-extern int scc_comparison_operator (rtx, enum machine_mode);
-extern int trap_comparison_operator (rtx, enum machine_mode);
-extern int boolean_operator (rtx, enum machine_mode);
-extern int boolean_or_operator (rtx, enum machine_mode);
-extern int min_max_operator (rtx, enum machine_mode);
extern int includes_lshift_p (rtx, rtx);
extern int includes_rshift_p (rtx, rtx);
extern int includes_rldic_lshift_p (rtx, rtx);
extern int includes_rldicr_lshift_p (rtx, rtx);
+extern int insvdi_rshift_rlwimi_p (rtx, rtx, rtx);
extern int registers_ok_for_quad_peep (rtx, rtx);
-extern int addrs_ok_for_quad_peep (rtx, rtx);
+extern int mems_ok_for_quad_peep (rtx, rtx);
extern bool gpr_or_gpr_p (rtx, rtx);
-extern enum reg_class secondary_reload_class (enum reg_class,
- enum machine_mode, rtx);
+extern enum reg_class rs6000_secondary_reload_class (enum reg_class,
+ enum machine_mode, rtx);
extern int ccr_bit (rtx, int);
extern int extract_MB (rtx);
extern int extract_ME (rtx);
+extern void rs6000_output_function_entry (FILE *, const char *);
extern void print_operand (FILE *, rtx, int);
extern void print_operand_address (FILE *, rtx);
extern enum rtx_code rs6000_reverse_condition (enum machine_mode,
@@ -116,52 +75,56 @@ extern enum rtx_code rs6000_reverse_condition (enum machine_mode,
extern void rs6000_emit_sCOND (enum rtx_code, rtx);
extern void rs6000_emit_cbranch (enum rtx_code, rtx);
extern char * output_cbranch (rtx, const char *, int, rtx);
-extern char * output_e500_flip_eq_bit (rtx, rtx);
+extern char * output_e500_flip_gt_bit (rtx, rtx);
extern rtx rs6000_emit_set_const (rtx, enum machine_mode, rtx, int);
extern int rs6000_emit_cmove (rtx, rtx, rtx, rtx);
+extern int rs6000_emit_vector_cond_expr (rtx, rtx, rtx, rtx, rtx, rtx);
extern void rs6000_emit_minmax (rtx, enum rtx_code, rtx, rtx);
+extern void rs6000_emit_sync (enum rtx_code, enum machine_mode,
+ rtx, rtx, rtx, rtx, bool);
+extern void rs6000_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_split_compare_and_swap (rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_expand_compare_and_swapqhi (rtx, rtx, rtx, rtx);
+extern void rs6000_split_compare_and_swapqhi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_split_lock_test_and_set (rtx, rtx, rtx, rtx);
+extern void rs6000_emit_swdivsf (rtx, rtx, rtx);
+extern void rs6000_emit_swdivdf (rtx, rtx, rtx);
extern void output_toc (FILE *, rtx, int, enum machine_mode);
extern void rs6000_initialize_trampoline (rtx, rtx, rtx);
-extern struct rtx_def *rs6000_longcall_ref (rtx);
+extern rtx rs6000_longcall_ref (rtx);
extern void rs6000_fatal_bad_address (rtx);
-extern int stmw_operation (rtx, enum machine_mode);
-extern int mfcr_operation (rtx, enum machine_mode);
-extern int mtcrf_operation (rtx, enum machine_mode);
-extern int lmw_operation (rtx, enum machine_mode);
-extern struct rtx_def *create_TOC_reference (rtx);
+extern rtx create_TOC_reference (rtx);
extern void rs6000_split_multireg_move (rtx, rtx);
extern void rs6000_emit_move (rtx, rtx, enum machine_mode);
extern rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
extern rtx rs6000_legitimize_reload_address (rtx, enum machine_mode,
- int, int, int, int *);
+ int, int, int, int *);
extern int rs6000_legitimate_address (enum machine_mode, rtx, int);
+extern bool rs6000_legitimate_offset_address_p (enum machine_mode, rtx, int);
extern bool rs6000_mode_dependent_address (rtx);
+extern bool rs6000_offsettable_memref_p (rtx);
extern rtx rs6000_return_addr (int, rtx);
extern void rs6000_output_symbol_ref (FILE*, rtx);
extern HOST_WIDE_INT rs6000_initial_elimination_offset (int, int);
-extern rtx rs6000_machopic_legitimize_pic_address (rtx orig,
- enum machine_mode mode, rtx reg);
-
+extern rtx rs6000_machopic_legitimize_pic_address (rtx, enum machine_mode,
+ rtx);
#endif /* RTX_CODE */
#ifdef TREE_CODE
-extern unsigned int rs6000_special_round_type_align (tree, int, int);
+extern unsigned int rs6000_special_round_type_align (tree, unsigned int,
+ unsigned int);
extern void function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
- tree, int);
+ tree, int, int);
extern int function_arg_boundary (enum machine_mode, tree);
-extern struct rtx_def *function_arg (CUMULATIVE_ARGS *,
- enum machine_mode, tree, int);
-extern int function_arg_partial_nregs (CUMULATIVE_ARGS *,
- enum machine_mode, tree, int);
-extern int function_arg_pass_by_reference (CUMULATIVE_ARGS *,
- enum machine_mode,
- tree, int);
+extern rtx function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
+extern tree altivec_resolve_overloaded_builtin (tree, tree);
extern rtx rs6000_function_value (tree, tree);
extern rtx rs6000_libcall_value (enum machine_mode);
-extern struct rtx_def *rs6000_va_arg (tree, tree);
+extern rtx rs6000_va_arg (tree, tree);
extern int function_ok_for_sibcall (tree);
extern void rs6000_elf_declare_function_name (FILE *, const char *, tree);
+extern bool rs6000_elf_in_small_data_p (tree);
#ifdef ARGS_SIZE_RTX
/* expr.h defines ARGS_SIZE_RTX and `enum direction' */
extern enum direction function_arg_padding (enum machine_mode, tree);
@@ -179,13 +142,6 @@ extern void rs6000_gen_section_name (char **, const char *, const char *);
extern void output_function_profiler (FILE *, int);
extern void output_profile_hook (int);
extern int rs6000_trampoline_size (void);
-extern void toc_section (void);
-extern void sdata_section (void);
-extern void sdata2_section (void);
-extern void sbss_section (void);
-extern void private_data_section (void);
-extern void read_only_data_section (void);
-extern void read_only_private_data_section (void);
extern int get_TOC_alias_set (void);
extern void rs6000_emit_prologue (void);
extern void rs6000_emit_load_toc_table (int);
@@ -194,13 +150,12 @@ extern unsigned int rs6000_dbx_register_number (unsigned int);
extern void rs6000_emit_epilogue (int);
extern void rs6000_emit_eh_reg_restore (rtx, rtx);
extern const char * output_isel (rtx *);
-extern int vrsave_operation (rtx, enum machine_mode);
extern int rs6000_register_move_cost (enum machine_mode,
- enum reg_class, enum reg_class);
+ enum reg_class, enum reg_class);
extern int rs6000_memory_move_cost (enum machine_mode, enum reg_class, int);
extern bool rs6000_tls_referenced_p (rtx);
-extern int rs6000_tls_symbol_ref (rtx, enum machine_mode);
-extern void rs6000_output_dwarf_dtprel (FILE*, int, rtx);
+extern int rs6000_hard_regno_nregs (int, enum machine_mode);
+extern void rs6000_conditional_register_usage (void);
/* Declare functions in rs6000-c.c */
@@ -211,4 +166,5 @@ extern void rs6000_cpu_cpp_builtins (struct cpp_reader *);
char *output_call (rtx, rtx *, int, int);
#endif
+extern bool rs6000_hard_regno_mode_ok_p[][FIRST_PSEUDO_REGISTER];
#endif /* rs6000-protos.h */
diff --git a/contrib/gcc/config/rs6000/rs6000.c b/contrib/gcc/config/rs6000/rs6000.c
index ea35843..5e9d0fe 100644
--- a/contrib/gcc/config/rs6000/rs6000.c
+++ b/contrib/gcc/config/rs6000/rs6000.c
@@ -1,6 +1,7 @@
/* Subroutines used for code generation on IBM RS/6000.
- Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
@@ -17,8 +18,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#include "config.h"
#include "system.h"
@@ -52,18 +53,21 @@
#include "reload.h"
#include "cfglayout.h"
#include "sched-int.h"
+#include "tree-gimple.h"
+#include "intl.h"
+#include "params.h"
+#include "tm-constrs.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
+#if TARGET_MACHO
+#include "gstab.h" /* for N_SLINE */
+#endif
#ifndef TARGET_NO_PROTOTYPE
#define TARGET_NO_PROTOTYPE 0
#endif
-#define EASY_VECTOR_15(n) ((n) >= -16 && (n) <= 15)
-#define EASY_VECTOR_15_ADD_SELF(n) ((n) >= 0x10 && (n) <= 0x1e \
- && !((n) & 1))
-
#define min(A,B) ((A) < (B) ? (A) : (B))
#define max(A,B) ((A) > (B) ? (A) : (B))
@@ -75,9 +79,10 @@ typedef struct rs6000_stack {
int lr_save_p; /* true if the link reg needs to be saved */
int cr_save_p; /* true if the CR reg needs to be saved */
unsigned int vrsave_mask; /* mask of vec registers to save */
- int toc_save_p; /* true if the TOC needs to be saved */
int push_p; /* true if we need to allocate stack space */
int calls_p; /* true if the function makes any calls */
+ int world_save_p; /* true if we're saving *everything*:
+ r13-r31, cr, f14-f31, vrsave, v20-v31 */
enum rs6000_abi abi; /* which ABI to use */
int gp_save_offset; /* offset to save GP regs from initial SP */
int fp_save_offset; /* offset to save FP regs from initial SP */
@@ -86,11 +91,9 @@ typedef struct rs6000_stack {
int cr_save_offset; /* offset to save CR from initial SP */
int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
- int toc_save_offset; /* offset to save the TOC pointer */
int varargs_save_offset; /* offset to save the varargs registers */
int ehrd_offset; /* offset to EH return data */
int reg_size; /* register size (4 or 8) */
- int varargs_size; /* size to hold V.4 args passed in regs */
HOST_WIDE_INT vars_size; /* variable save area size */
int parm_size; /* outgoing parameter size */
int save_size; /* save area size */
@@ -99,17 +102,32 @@ typedef struct rs6000_stack {
int fp_size; /* size of saved FP registers */
int altivec_size; /* size of saved AltiVec registers */
int cr_size; /* size to hold CR if not in save_size */
- int lr_size; /* size to hold LR if not in save_size */
int vrsave_size; /* size to hold VRSAVE if not in save_size */
int altivec_padding_size; /* size of altivec alignment padding if
not in save_size */
int spe_gp_size; /* size of 64-bit GPR save size for SPE */
int spe_padding_size;
- int toc_size; /* size to hold TOC if not in save_size */
HOST_WIDE_INT total_size; /* total bytes allocated for stack */
int spe_64bit_regs_used;
} rs6000_stack_t;
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+typedef struct machine_function GTY(())
+{
+ /* Flags if __builtin_return_address (n) with n >= 1 was used. */
+ int ra_needs_full_frame;
+ /* Some local-dynamic symbol. */
+ const char *some_ld_name;
+ /* Whether the instruction chain has been scanned already. */
+ int insn_chain_scanned_p;
+ /* Flags if __builtin_return_address (0) was used. */
+ int ra_need_lr;
+ /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
+ varargs save area. */
+ HOST_WIDE_INT varargs_save_offset;
+} machine_function;
+
/* Target cpu type */
enum processor_type rs6000_cpu;
@@ -127,11 +145,6 @@ static GTY(()) bool rs6000_always_hint;
/* Schedule instructions for group formation. */
static GTY(()) bool rs6000_sched_groups;
-/* Support adjust_priority scheduler hook
- and -mprioritize-restricted-insns= option. */
-const char *rs6000_sched_restricted_insns_priority_str;
-int rs6000_sched_restricted_insns_priority;
-
/* Support for -msched-costly-dep option. */
const char *rs6000_sched_costly_dep_str;
enum rs6000_dependence_cost rs6000_sched_costly_dep;
@@ -140,39 +153,26 @@ enum rs6000_dependence_cost rs6000_sched_costly_dep;
const char *rs6000_sched_insert_nops_str;
enum rs6000_nop_insertion rs6000_sched_insert_nops;
-/* Size of long double */
-const char *rs6000_long_double_size_string;
-int rs6000_long_double_type_size;
+/* Support targetm.vectorize.builtin_mask_for_load. */
+static GTY(()) tree altivec_builtin_mask_for_load;
-/* Whether -mabi=altivec has appeared */
-int rs6000_altivec_abi;
+/* Size of long double. */
+int rs6000_long_double_type_size;
-/* Whether VRSAVE instructions should be generated. */
-int rs6000_altivec_vrsave;
+/* IEEE quad extended precision long double. */
+int rs6000_ieeequad;
-/* String from -mvrsave= option. */
-const char *rs6000_altivec_vrsave_string;
+/* Whether -mabi=altivec has appeared. */
+int rs6000_altivec_abi;
/* Nonzero if we want SPE ABI extensions. */
int rs6000_spe_abi;
-/* Whether isel instructions should be generated. */
-int rs6000_isel;
-
-/* Whether SPE simd instructions should be generated. */
-int rs6000_spe;
-
/* Nonzero if floating point operations are done in the GPRs. */
int rs6000_float_gprs = 0;
-/* String from -mfloat-gprs=. */
-const char *rs6000_float_gprs_string;
-
-/* String from -misel=. */
-const char *rs6000_isel_string;
-
-/* String from -mspe=. */
-const char *rs6000_spe_string;
+/* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
+int rs6000_darwin64_abi;
/* Set to nonzero once AIX common-mode calls have been defined. */
static GTY(()) int common_mode_defined;
@@ -207,32 +207,21 @@ const char *rs6000_tls_size_string;
/* ABI enumeration available for subtarget to use. */
enum rs6000_abi rs6000_current_abi;
-/* ABI string from -mabi= option. */
-const char *rs6000_abi_string;
+/* Whether to use variant of AIX ABI for PowerPC64 Linux. */
+int dot_symbols;
/* Debug flags */
const char *rs6000_debug_name;
int rs6000_debug_stack; /* debug stack applications */
int rs6000_debug_arg; /* debug argument handling */
-/* Opaque types. */
-static GTY(()) tree opaque_V2SI_type_node;
-static GTY(()) tree opaque_V2SF_type_node;
-static GTY(()) tree opaque_p_V2SI_type_node;
-
-/* AltiVec requires a few more basic types in addition to the vector
- types already defined in tree.c. */
-static GTY(()) tree bool_char_type_node; /* __bool char */
-static GTY(()) tree bool_short_type_node; /* __bool short */
-static GTY(()) tree bool_int_type_node; /* __bool int */
-static GTY(()) tree pixel_type_node; /* __pixel */
-static GTY(()) tree bool_V16QI_type_node; /* __vector __bool char */
-static GTY(()) tree bool_V8HI_type_node; /* __vector __bool short */
-static GTY(()) tree bool_V4SI_type_node; /* __vector __bool int */
-static GTY(()) tree pixel_V8HI_type_node; /* __vector __pixel */
-
-int rs6000_warn_altivec_long = 1; /* On by default. */
-const char *rs6000_warn_altivec_long_switch;
+/* Value is TRUE if register/mode pair is acceptable. */
+bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
+
+/* Built in types. */
+
+tree rs6000_builtin_types[RS6000_BTI_MAX];
+tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
const char *rs6000_traceback_name;
static enum {
@@ -246,22 +235,28 @@ static enum {
int toc_initialized;
char toc_label_name[10];
-/* Alias set for saves and restores from the rs6000 stack. */
-static GTY(()) int rs6000_sr_alias_set;
-
-/* Call distance, overridden by -mlongcall and #pragma longcall(1).
- The only place that looks at this is rs6000_set_default_type_attributes;
- everywhere else should rely on the presence or absence of a longcall
- attribute on the function declaration. Exception: init_cumulative_args
- looks at it too, for libcalls. */
-int rs6000_default_long_calls;
-const char *rs6000_longcall_switch;
+static GTY(()) section *read_only_data_section;
+static GTY(()) section *private_data_section;
+static GTY(()) section *read_only_private_data_section;
+static GTY(()) section *sdata2_section;
+static GTY(()) section *toc_section;
/* Control alignment for fields within structures. */
/* String from -malign-XXXXX. */
-const char *rs6000_alignment_string;
int rs6000_alignment_flags;
+/* True for any options that were explicitly set. */
+struct {
+ bool aix_struct_ret; /* True if -maix-struct-ret was used. */
+ bool alignment; /* True if -malign- was used. */
+ bool abi; /* True if -mabi=spe/nospe was used. */
+ bool spe; /* True if -mspe= was used. */
+ bool float_gprs; /* True if -mfloat-gprs= was used. */
+ bool isel; /* True if -misel was used. */
+ bool long_double; /* True if -mlong-double- was used. */
+ bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */
+} rs6000_explicit_options;
+
struct builtin_description
{
/* mask is not const because we're going to alter it below. This
@@ -272,10 +267,314 @@ struct builtin_description
const char *const name;
const enum rs6000_builtins code;
};
+
+/* Target cpu costs. */
+
+struct processor_costs {
+ const int mulsi; /* cost of SImode multiplication. */
+ const int mulsi_const; /* cost of SImode multiplication by constant. */
+ const int mulsi_const9; /* cost of SImode mult by short constant. */
+ const int muldi; /* cost of DImode multiplication. */
+ const int divsi; /* cost of SImode division. */
+ const int divdi; /* cost of DImode division. */
+ const int fp; /* cost of simple SFmode and DFmode insns. */
+ const int dmul; /* cost of DFmode multiplication (and fmadd). */
+ const int sdiv; /* cost of SFmode division (fdivs). */
+ const int ddiv; /* cost of DFmode division (fdiv). */
+};
+
+const struct processor_costs *rs6000_cost;
+
+/* Processor costs (relative to an add) */
+
+/* Instruction size costs on 32bit processors. */
+static const
+struct processor_costs size32_cost = {
+ COSTS_N_INSNS (1), /* mulsi */
+ COSTS_N_INSNS (1), /* mulsi_const */
+ COSTS_N_INSNS (1), /* mulsi_const9 */
+ COSTS_N_INSNS (1), /* muldi */
+ COSTS_N_INSNS (1), /* divsi */
+ COSTS_N_INSNS (1), /* divdi */
+ COSTS_N_INSNS (1), /* fp */
+ COSTS_N_INSNS (1), /* dmul */
+ COSTS_N_INSNS (1), /* sdiv */
+ COSTS_N_INSNS (1), /* ddiv */
+};
+
+/* Instruction size costs on 64bit processors. */
+static const
+struct processor_costs size64_cost = {
+ COSTS_N_INSNS (1), /* mulsi */
+ COSTS_N_INSNS (1), /* mulsi_const */
+ COSTS_N_INSNS (1), /* mulsi_const9 */
+ COSTS_N_INSNS (1), /* muldi */
+ COSTS_N_INSNS (1), /* divsi */
+ COSTS_N_INSNS (1), /* divdi */
+ COSTS_N_INSNS (1), /* fp */
+ COSTS_N_INSNS (1), /* dmul */
+ COSTS_N_INSNS (1), /* sdiv */
+ COSTS_N_INSNS (1), /* ddiv */
+};
+
+/* Instruction costs on RIOS1 processors. */
+static const
+struct processor_costs rios1_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (19), /* divsi */
+ COSTS_N_INSNS (19), /* divdi */
+ COSTS_N_INSNS (2), /* fp */
+ COSTS_N_INSNS (2), /* dmul */
+ COSTS_N_INSNS (19), /* sdiv */
+ COSTS_N_INSNS (19), /* ddiv */
+};
+
+/* Instruction costs on RIOS2 processors. */
+static const
+struct processor_costs rios2_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (13), /* divsi */
+ COSTS_N_INSNS (13), /* divdi */
+ COSTS_N_INSNS (2), /* fp */
+ COSTS_N_INSNS (2), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+};
+
+/* Instruction costs on RS64A processors. */
+static const
+struct processor_costs rs64a_cost = {
+ COSTS_N_INSNS (20), /* mulsi */
+ COSTS_N_INSNS (12), /* mulsi_const */
+ COSTS_N_INSNS (8), /* mulsi_const9 */
+ COSTS_N_INSNS (34), /* muldi */
+ COSTS_N_INSNS (65), /* divsi */
+ COSTS_N_INSNS (67), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (31), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+};
+
+/* Instruction costs on MPCCORE processors. */
+static const
+struct processor_costs mpccore_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (6), /* divsi */
+ COSTS_N_INSNS (6), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (10), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+};
+
+/* Instruction costs on PPC403 processors. */
+static const
+struct processor_costs ppc403_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (33), /* divsi */
+ COSTS_N_INSNS (33), /* divdi */
+ COSTS_N_INSNS (11), /* fp */
+ COSTS_N_INSNS (11), /* dmul */
+ COSTS_N_INSNS (11), /* sdiv */
+ COSTS_N_INSNS (11), /* ddiv */
+};
+
+/* Instruction costs on PPC405 processors. */
+static const
+struct processor_costs ppc405_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (35), /* divsi */
+ COSTS_N_INSNS (35), /* divdi */
+ COSTS_N_INSNS (11), /* fp */
+ COSTS_N_INSNS (11), /* dmul */
+ COSTS_N_INSNS (11), /* sdiv */
+ COSTS_N_INSNS (11), /* ddiv */
+};
+
+/* Instruction costs on PPC440 processors. */
+static const
+struct processor_costs ppc440_cost = {
+ COSTS_N_INSNS (3), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (3), /* muldi */
+ COSTS_N_INSNS (34), /* divsi */
+ COSTS_N_INSNS (34), /* divdi */
+ COSTS_N_INSNS (5), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (19), /* sdiv */
+ COSTS_N_INSNS (33), /* ddiv */
+};
+
+/* Instruction costs on PPC601 processors. */
+static const
+struct processor_costs ppc601_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (5), /* mulsi_const */
+ COSTS_N_INSNS (5), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (36), /* divsi */
+ COSTS_N_INSNS (36), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+};
+
+/* Instruction costs on PPC603 processors. */
+static const
+struct processor_costs ppc603_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (37), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (33), /* ddiv */
+};
+
+/* Instruction costs on PPC604 processors. */
+static const
+struct processor_costs ppc604_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (20), /* divsi */
+ COSTS_N_INSNS (20), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+};
+
+/* Instruction costs on PPC604e processors. */
+static const
+struct processor_costs ppc604e_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (20), /* divsi */
+ COSTS_N_INSNS (20), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+};
+
+/* Instruction costs on PPC620 processors. */
+static const
+struct processor_costs ppc620_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (7), /* muldi */
+ COSTS_N_INSNS (21), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+};
+
+/* Instruction costs on PPC630 processors. */
+static const
+struct processor_costs ppc630_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (7), /* muldi */
+ COSTS_N_INSNS (21), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (21), /* ddiv */
+};
+
+/* Instruction costs on PPC750 and PPC7400 processors. */
+static const
+struct processor_costs ppc750_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (17), /* divsi */
+ COSTS_N_INSNS (17), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+};
+
+/* Instruction costs on PPC7450 processors. */
+static const
+struct processor_costs ppc7450_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (23), /* divsi */
+ COSTS_N_INSNS (23), /* divdi */
+ COSTS_N_INSNS (5), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (21), /* sdiv */
+ COSTS_N_INSNS (35), /* ddiv */
+};
+
+/* Instruction costs on PPC8540 processors. */
+static const
+struct processor_costs ppc8540_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (19), /* divsi */
+ COSTS_N_INSNS (19), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (29), /* sdiv */
+ COSTS_N_INSNS (29), /* ddiv */
+};
+
+/* Instruction costs on POWER4 and POWER5 processors. */
+static const
+struct processor_costs power4_cost = {
+ COSTS_N_INSNS (3), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (18), /* divsi */
+ COSTS_N_INSNS (34), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+};
+
static bool rs6000_function_ok_for_sibcall (tree, tree);
-static int num_insns_constant_wide (HOST_WIDE_INT);
-static void validate_condition_mode (enum rtx_code, enum machine_mode);
+static const char *rs6000_invalid_within_doloop (rtx);
static rtx rs6000_generate_compare (enum rtx_code);
static void rs6000_maybe_dead (rtx);
static void rs6000_emit_stack_tie (void);
@@ -291,21 +590,21 @@ static unsigned toc_hash_function (const void *);
static int toc_hash_eq (const void *, const void *);
static int constant_pool_expr_1 (rtx, int *, int *);
static bool constant_pool_expr_p (rtx);
-static bool toc_relative_expr_p (rtx);
static bool legitimate_small_data_p (enum machine_mode, rtx);
-static bool legitimate_offset_address_p (enum machine_mode, rtx, int);
static bool legitimate_indexed_address_p (rtx, int);
-static bool legitimate_indirect_address_p (rtx, int);
-static bool macho_lo_sum_memory_operand (rtx x, enum machine_mode mode);
static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
static struct machine_function * rs6000_init_machine_status (void);
static bool rs6000_assemble_integer (rtx, unsigned int, int);
+static bool no_global_regs_above (int);
#ifdef HAVE_GAS_HIDDEN
static void rs6000_assemble_visibility (tree, int);
#endif
static int rs6000_ra_ever_killed (void);
static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
+static bool rs6000_ms_bitfield_layout_p (tree);
+static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
+static void rs6000_eliminate_indexed_memrefs (rtx operands[2]);
static const char *rs6000_mangle_fundamental_type (tree);
extern const struct attribute_spec rs6000_attribute_table[];
static void rs6000_set_default_type_attributes (tree);
@@ -317,34 +616,33 @@ static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
static bool rs6000_return_in_memory (tree, tree);
static void rs6000_file_start (void);
#if TARGET_ELF
-static unsigned int rs6000_elf_section_type_flags (tree, const char *, int);
+static int rs6000_elf_reloc_rw_mask (void);
static void rs6000_elf_asm_out_constructor (rtx, int);
static void rs6000_elf_asm_out_destructor (rtx, int);
static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
-static void rs6000_elf_select_section (tree, int, unsigned HOST_WIDE_INT);
-static void rs6000_elf_unique_section (tree, int);
-static void rs6000_elf_select_rtx_section (enum machine_mode, rtx,
- unsigned HOST_WIDE_INT);
+static void rs6000_elf_asm_init_sections (void);
+static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
static void rs6000_elf_encode_section_info (tree, rtx, int)
ATTRIBUTE_UNUSED;
-static bool rs6000_elf_in_small_data_p (tree);
#endif
+static bool rs6000_use_blocks_for_constant_p (enum machine_mode, rtx);
#if TARGET_XCOFF
+static void rs6000_xcoff_asm_output_anchor (rtx);
static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
-static void rs6000_xcoff_asm_named_section (const char *, unsigned int);
-static void rs6000_xcoff_select_section (tree, int, unsigned HOST_WIDE_INT);
-static void rs6000_xcoff_unique_section (tree, int);
-static void rs6000_xcoff_select_rtx_section (enum machine_mode, rtx,
+static void rs6000_xcoff_asm_init_sections (void);
+static int rs6000_xcoff_reloc_rw_mask (void);
+static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
+static section *rs6000_xcoff_select_section (tree, int,
unsigned HOST_WIDE_INT);
+static void rs6000_xcoff_unique_section (tree, int);
+static section *rs6000_xcoff_select_rtx_section
+ (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
static const char * rs6000_xcoff_strip_name_encoding (const char *);
static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
static void rs6000_xcoff_file_start (void);
static void rs6000_xcoff_file_end (void);
#endif
-#if TARGET_MACHO
-static bool rs6000_binds_local_p (tree);
-#endif
-static int rs6000_use_dfa_pipeline_interface (void);
static int rs6000_variable_issue (FILE *, int, rtx, int);
static bool rs6000_rtx_costs (rtx, int, int, int *);
static int rs6000_adjust_cost (rtx, rtx, rtx, int);
@@ -363,7 +661,9 @@ static int redefine_groups (FILE *, int, rtx, rtx);
static int pad_groups (FILE *, int, rtx, rtx);
static void rs6000_sched_finish (FILE *, int);
static int rs6000_use_sched_lookahead (void);
+static tree rs6000_builtin_mask_for_load (void);
+static void def_builtin (int, const char *, tree, int);
static void rs6000_init_builtins (void);
static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
@@ -376,6 +676,7 @@ static void rs6000_init_libfuncs (void);
static void enable_mask_for_builtins (struct builtin_description *, int,
enum rs6000_builtins,
enum rs6000_builtins);
+static tree build_opaque_vector_type (tree, int);
static void spe_init_builtins (void);
static rtx spe_expand_builtin (tree, rtx, bool *);
static rtx spe_expand_stv_builtin (enum insn_code, tree);
@@ -390,45 +691,78 @@ static rtx altivec_expand_ld_builtin (tree, rtx, bool *);
static rtx altivec_expand_st_builtin (tree, rtx, bool *);
static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
-static rtx altivec_expand_predicate_builtin (enum insn_code,
- const char *, tree, rtx);
+static rtx altivec_expand_predicate_builtin (enum insn_code,
+ const char *, tree, rtx);
static rtx altivec_expand_lv_builtin (enum insn_code, tree, rtx);
static rtx altivec_expand_stv_builtin (enum insn_code, tree);
-static void rs6000_parse_abi_options (void);
-static void rs6000_parse_alignment_option (void);
+static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
+static rtx altivec_expand_vec_set_builtin (tree);
+static rtx altivec_expand_vec_ext_builtin (tree, rtx);
+static int get_element_number (tree, tree);
+static bool rs6000_handle_option (size_t, const char *, int);
static void rs6000_parse_tls_size_option (void);
static void rs6000_parse_yes_no_option (const char *, const char *, int *);
static int first_altivec_reg_to_save (void);
static unsigned int compute_vrsave_mask (void);
+static void compute_save_world_info (rs6000_stack_t *info_ptr);
static void is_altivec_return_reg (rtx, void *);
static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
int easy_vector_constant (rtx, enum machine_mode);
-static int easy_vector_same (rtx, enum machine_mode);
-static int easy_vector_splat_const (int, enum machine_mode);
-static bool is_ev64_opaque_type (tree);
+static bool rs6000_is_opaque_type (tree);
static rtx rs6000_dwarf_register_span (rtx);
static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
+static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
static rtx rs6000_tls_get_addr (void);
static rtx rs6000_got_sym (void);
-static inline int rs6000_tls_symbol_ref_1 (rtx *, void *);
+static int rs6000_tls_symbol_ref_1 (rtx *, void *);
static const char *rs6000_get_some_local_dynamic_name (void);
static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
static rtx rs6000_complex_function_value (enum machine_mode);
static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
enum machine_mode, tree);
+static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
+ HOST_WIDE_INT);
+static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
+ tree, HOST_WIDE_INT);
+static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
+ HOST_WIDE_INT,
+ rtx[], int *);
+static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
+ tree, HOST_WIDE_INT,
+ rtx[], int *);
+static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, tree, int, bool);
static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
static void setup_incoming_varargs (CUMULATIVE_ARGS *,
enum machine_mode, tree,
int *, int);
+static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static const char *invalid_arg_for_unprototyped_fn (tree, tree, tree);
#if TARGET_MACHO
static void macho_branch_islands (void);
-static void add_compiler_branch_island (tree, tree, int);
static int no_previous_def (tree function_name);
static tree get_prev_label (tree function_name);
+static void rs6000_darwin_file_start (void);
#endif
static tree rs6000_build_builtin_va_list (void);
+static tree rs6000_gimplify_va_arg (tree, tree, tree *, tree *);
+static bool rs6000_must_pass_in_stack (enum machine_mode, tree);
+static bool rs6000_scalar_mode_supported_p (enum machine_mode);
+static bool rs6000_vector_mode_supported_p (enum machine_mode);
+static int get_vec_cmp_insn (enum rtx_code, enum machine_mode,
+ enum machine_mode);
+static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
+ enum machine_mode);
+static int get_vsel_insn (enum machine_mode);
+static void rs6000_emit_vector_select (rtx, rtx, rtx, rtx);
+static tree rs6000_stack_protect_fail (void);
+
+const int INSN_NOT_AVAILABLE = -1;
+static enum machine_mode rs6000_eh_return_filter_mode (void);
/* Hash table stuff for keeping track of TOC entries. */
@@ -464,7 +798,9 @@ char rs6000_reg_names[][8] =
"24", "25", "26", "27", "28", "29", "30", "31",
"vrsave", "vscr",
/* SPE registers. */
- "spe_acc", "spefscr"
+ "spe_acc", "spefscr",
+ /* Soft frame pointer. */
+ "sfp"
};
#ifdef TARGET_REGNAMES
@@ -488,7 +824,9 @@ static const char alt_reg_names[][8] =
"%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
"vrsave", "vscr",
/* SPE registers. */
- "spe_acc", "spefscr"
+ "spe_acc", "spefscr",
+ /* Soft frame pointer. */
+ "sfp"
};
#endif
@@ -501,10 +839,6 @@ static const char alt_reg_names[][8] =
/* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
#define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
-
-/* Return 1 for a symbol ref for a thread-local storage symbol. */
-#define RS6000_SYMBOL_REF_TLS_P(RTX) \
- (GET_CODE (RTX) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (RTX) != 0)
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
@@ -533,6 +867,10 @@ static const char alt_reg_names[][8] =
#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
#undef TARGET_ASM_UNALIGNED_SI_OP
#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
#endif
#endif
@@ -557,8 +895,6 @@ static const char alt_reg_names[][8] =
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
-#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
-#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE rs6000_use_dfa_pipeline_interface
#undef TARGET_SCHED_VARIABLE_ISSUE
#define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
@@ -568,7 +904,7 @@ static const char alt_reg_names[][8] =
#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
#undef TARGET_SCHED_ADJUST_PRIORITY
#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
-#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
+#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
#define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
#undef TARGET_SCHED_FINISH
#define TARGET_SCHED_FINISH rs6000_sched_finish
@@ -576,6 +912,9 @@ static const char alt_reg_names[][8] =
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
+#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
+#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
+
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS rs6000_init_builtins
@@ -590,9 +929,12 @@ static const char alt_reg_names[][8] =
#if TARGET_MACHO
#undef TARGET_BINDS_LOCAL_P
-#define TARGET_BINDS_LOCAL_P rs6000_binds_local_p
+#define TARGET_BINDS_LOCAL_P darwin_binds_local_p
#endif
+#undef TARGET_MS_BITFIELD_LAYOUT_P
+#define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
+
#undef TARGET_ASM_OUTPUT_MI_THUNK
#define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
@@ -602,13 +944,16 @@ static const char alt_reg_names[][8] =
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
#define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
+#undef TARGET_INVALID_WITHIN_DOLOOP
+#define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
+
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS rs6000_rtx_costs
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST hook_int_rtx_0
#undef TARGET_VECTOR_OPAQUE_P
-#define TARGET_VECTOR_OPAQUE_P is_ev64_opaque_type
+#define TARGET_VECTOR_OPAQUE_P rs6000_is_opaque_type
#undef TARGET_DWARF_REGISTER_SPAN
#define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
@@ -620,10 +965,6 @@ static const char alt_reg_names[][8] =
#undef TARGET_PROMOTE_FUNCTION_RETURN
#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
-/* Structure return values are passed as an extra parameter. */
-#undef TARGET_STRUCT_VALUE_RTX
-#define TARGET_STRUCT_VALUE_RTX hook_rtx_tree_int_null
-
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
@@ -637,12 +978,134 @@ static const char alt_reg_names[][8] =
#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
#undef TARGET_SPLIT_COMPLEX_ARG
#define TARGET_SPLIT_COMPLEX_ARG hook_bool_tree_true
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
#undef TARGET_BUILD_BUILTIN_VA_LIST
#define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
+
+#undef TARGET_EH_RETURN_FILTER_MODE
+#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
+
+#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
+#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION rs6000_handle_option
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
+ (TARGET_DEFAULT)
+
+#undef TARGET_STACK_PROTECT_FAIL
+#define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
+
+/* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
+ The PowerPC architecture requires only weak consistency among
+ processors--that is, memory accesses between processors need not be
+ sequentially consistent and memory accesses among processors can occur
+ in any order. The ability to order memory accesses weakly provides
+ opportunities for more efficient use of the system bus. Unless a
+ dependency exists, the 604e allows read operations to precede store
+ operations. */
+#undef TARGET_RELAXED_ORDERING
+#define TARGET_RELAXED_ORDERING true
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
+#endif
+
+/* Use a 32-bit anchor range. This leads to sequences like:
+
+ addis tmp,anchor,high
+ add dest,tmp,low
+
+ where tmp itself acts as an anchor, and can be shared between
+ accesses to the same 64k page. */
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode
+ MODE. */
+static int
+rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ /* The GPRs can hold any mode, but values bigger than one register
+ cannot go past R31. */
+ if (INT_REGNO_P (regno))
+ return INT_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1);
+
+ /* The float registers can only hold floating modes and DImode.
+ This also excludes decimal float modes. */
+ if (FP_REGNO_P (regno))
+ return
+ (SCALAR_FLOAT_MODE_P (mode)
+ && !DECIMAL_FLOAT_MODE_P (mode)
+ && FP_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1))
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD);
+
+ /* The CR register can only hold CC modes. */
+ if (CR_REGNO_P (regno))
+ return GET_MODE_CLASS (mode) == MODE_CC;
+
+ if (XER_REGNO_P (regno))
+ return mode == PSImode;
+
+ /* AltiVec only in AldyVec registers. */
+ if (ALTIVEC_REGNO_P (regno))
+ return ALTIVEC_VECTOR_MODE (mode);
+
+ /* ...but GPRs can hold SIMD data on the SPE in one register. */
+ if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return 1;
+
+ /* We cannot put TImode anywhere except general register and it must be
+ able to fit within the register set. */
+
+ return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
+}
+
+/* Initialize rs6000_hard_regno_mode_ok_p table. */
+static void
+rs6000_init_hard_regno_mode_ok (void)
+{
+ int r, m;
+
+ for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
+ for (m = 0; m < NUM_MACHINE_MODES; ++m)
+ if (rs6000_hard_regno_mode_ok (r, m))
+ rs6000_hard_regno_mode_ok_p[m][r] = true;
+}
+
+/* If not otherwise specified by a target, make 'long double' equivalent to
+ 'double'. */
+
+#ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
+#endif
+
/* Override command line options. Mostly we process the processor
type and sometimes adjust other TARGET_ options. */
@@ -663,7 +1126,7 @@ rs6000_override_options (const char *default_cpu)
/* This table occasionally claims that a processor does not support
a particular feature even though it does, but the feature is slower
than the alternative. Thus, it shouldn't be relied on as a
- complete description of the processor's support.
+ complete description of the processor's support.
Please keep this list in order, and don't forget to update the
documentation in invoke.texi when adding a new processor or
@@ -677,10 +1140,14 @@ rs6000_override_options (const char *default_cpu)
= {{"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
{"403", PROCESSOR_PPC403,
POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
- {"405", PROCESSOR_PPC405, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"405fp", PROCESSOR_PPC405, POWERPC_BASE_MASK},
- {"440", PROCESSOR_PPC440, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"440fp", PROCESSOR_PPC440, POWERPC_BASE_MASK},
+ {"405", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"405fp", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
+ {"440", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"440fp", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
{"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
{"601", PROCESSOR_PPC601,
MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
@@ -700,7 +1167,11 @@ rs6000_override_options (const char *default_cpu)
{"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
{"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
{"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"8540", PROCESSOR_PPC8540,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_STRICT_ALIGN},
+ /* 8548 has a dummy entry for now. */
+ {"8548", PROCESSOR_PPC8540,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_STRICT_ALIGN},
{"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
{"970", PROCESSOR_POWER4,
POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
@@ -718,17 +1189,25 @@ rs6000_override_options (const char *default_cpu)
{"power4", PROCESSOR_POWER4,
POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POWERPC64},
{"power5", PROCESSOR_POWER5,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POWERPC64},
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB},
+ {"power5+", PROCESSOR_POWER5,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
+ {"power6", PROCESSOR_POWER5,
+ POWERPC_7400_MASK | MASK_POWERPC64 | MASK_MFCRF | MASK_POPCNTB
+ | MASK_FPRND},
{"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
{"powerpc64", PROCESSOR_POWERPC64,
- POWERPC_BASE_MASK | MASK_POWERPC64},
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
{"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
{"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
{"rios2", PROCESSOR_RIOS2,
MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
{"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
{"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
- {"rs64a", PROCESSOR_RS64A, POWERPC_BASE_MASK | MASK_POWERPC64},
+ {"rs64", PROCESSOR_RS64A,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}
};
const size_t ptt_size = ARRAY_SIZE (processor_target_table);
@@ -741,11 +1220,15 @@ rs6000_override_options (const char *default_cpu)
enum {
POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
- POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT
+ POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
| MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
- | MASK_MFCRF)
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
+ | MASK_DLMZB)
};
- set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
+
+ rs6000_init_hard_regno_mode_ok ();
+
+ set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
#ifdef OS_MISSING_POWERPC64
if (OS_MISSING_POWERPC64)
set_masks &= ~MASK_POWERPC64;
@@ -807,14 +1290,14 @@ rs6000_override_options (const char *default_cpu)
{
target_flags &= ~MASK_MULTIPLE;
if ((target_flags_explicit & MASK_MULTIPLE) != 0)
- warning ("-mmultiple is not supported on little endian systems");
+ warning (0, "-mmultiple is not supported on little endian systems");
}
if (TARGET_STRING)
{
target_flags &= ~MASK_STRING;
if ((target_flags_explicit & MASK_STRING) != 0)
- warning ("-mstring is not supported on little endian systems");
+ warning (0, "-mstring is not supported on little endian systems");
}
}
@@ -840,44 +1323,40 @@ rs6000_override_options (const char *default_cpu)
else if (! strncmp (rs6000_traceback_name, "no", 2))
rs6000_traceback = traceback_none;
else
- error ("unknown -mtraceback arg `%s'; expecting `full', `partial' or `none'",
+ error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
rs6000_traceback_name);
}
- /* Set size of long double */
- rs6000_long_double_type_size = 64;
- if (rs6000_long_double_size_string)
- {
- char *tail;
- int size = strtol (rs6000_long_double_size_string, &tail, 10);
- if (*tail != '\0' || (size != 64 && size != 128))
- error ("Unknown switch -mlong-double-%s",
- rs6000_long_double_size_string);
- else
- rs6000_long_double_type_size = size;
- }
+ if (!rs6000_explicit_options.long_double)
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+
+#ifndef POWERPC_LINUX
+ if (!rs6000_explicit_options.ieee)
+ rs6000_ieeequad = 1;
+#endif
/* Set Altivec ABI as default for powerpc64 linux. */
if (TARGET_ELF && TARGET_64BIT)
{
rs6000_altivec_abi = 1;
- rs6000_altivec_vrsave = 1;
+ TARGET_ALTIVEC_VRSAVE = 1;
}
- /* Handle -mabi= options. */
- rs6000_parse_abi_options ();
-
- /* Handle -malign-XXXXX option. */
- rs6000_parse_alignment_option ();
+ /* Set the Darwin64 ABI as default for 64-bit Darwin. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ {
+ rs6000_darwin64_abi = 1;
+#if TARGET_MACHO
+ darwin_one_byte_bool = 1;
+#endif
+ /* Default to natural alignment, for better performance. */
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
+ }
- /* Handle generic -mFOO=YES/NO options. */
- rs6000_parse_yes_no_option ("vrsave", rs6000_altivec_vrsave_string,
- &rs6000_altivec_vrsave);
- rs6000_parse_yes_no_option ("isel", rs6000_isel_string,
- &rs6000_isel);
- rs6000_parse_yes_no_option ("spe", rs6000_spe_string, &rs6000_spe);
- rs6000_parse_yes_no_option ("float-gprs", rs6000_float_gprs_string,
- &rs6000_float_gprs);
+ /* Place FP constants in the constant pool instead of TOC
+ if section anchors enabled. */
+ if (flag_section_anchors)
+ TARGET_NO_FP_IN_TOC = 1;
/* Handle -mtls-size option. */
rs6000_parse_tls_size_option ();
@@ -888,37 +1367,35 @@ rs6000_override_options (const char *default_cpu)
#ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
SUBSUBTARGET_OVERRIDE_OPTIONS;
#endif
+#ifdef SUB3TARGET_OVERRIDE_OPTIONS
+ SUB3TARGET_OVERRIDE_OPTIONS;
+#endif
if (TARGET_E500)
{
if (TARGET_ALTIVEC)
- error ("AltiVec and E500 instructions cannot coexist");
+ error ("AltiVec and E500 instructions cannot coexist");
/* The e500 does not have string instructions, and we set
MASK_STRING above when optimizing for size. */
if ((target_flags & MASK_STRING) != 0)
target_flags = target_flags & ~MASK_STRING;
-
- /* No SPE means 64-bit long doubles, even if an E500. */
- if (rs6000_spe_string != 0
- && !strcmp (rs6000_spe_string, "no"))
- rs6000_long_double_type_size = 64;
}
else if (rs6000_select[1].string != NULL)
{
/* For the powerpc-eabispe configuration, we set all these by
default, so let's unset them if we manually set another
CPU that is not the E500. */
- if (rs6000_abi_string == 0)
+ if (!rs6000_explicit_options.abi)
rs6000_spe_abi = 0;
- if (rs6000_spe_string == 0)
+ if (!rs6000_explicit_options.spe)
rs6000_spe = 0;
- if (rs6000_float_gprs_string == 0)
+ if (!rs6000_explicit_options.float_gprs)
rs6000_float_gprs = 0;
- if (rs6000_isel_string == 0)
+ if (!rs6000_explicit_options.isel)
rs6000_isel = 0;
- if (rs6000_long_double_size_string == 0)
- rs6000_long_double_type_size = 64;
+ if (!rs6000_explicit_options.long_double)
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
}
rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
@@ -926,70 +1403,41 @@ rs6000_override_options (const char *default_cpu)
rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
|| rs6000_cpu == PROCESSOR_POWER5);
- /* Handle -m(no-)longcall option. This is a bit of a cheap hack,
- using TARGET_OPTIONS to handle a toggle switch, but we're out of
- bits in target_flags so TARGET_SWITCHES cannot be used.
- Assumption here is that rs6000_longcall_switch points into the
- text of the complete option, rather than being a copy, so we can
- scan back for the presence or absence of the no- modifier. */
- if (rs6000_longcall_switch)
- {
- const char *base = rs6000_longcall_switch;
- while (base[-1] != 'm') base--;
-
- if (*rs6000_longcall_switch != '\0')
- error ("invalid option `%s'", base);
- rs6000_default_long_calls = (base[0] != 'n');
- }
-
- /* Handle -m(no-)warn-altivec-long similarly. */
- if (rs6000_warn_altivec_long_switch)
- {
- const char *base = rs6000_warn_altivec_long_switch;
- while (base[-1] != 'm') base--;
-
- if (*rs6000_warn_altivec_long_switch != '\0')
- error ("invalid option `%s'", base);
- rs6000_warn_altivec_long = (base[0] != 'n');
- }
-
- /* Handle -mprioritize-restricted-insns option. */
rs6000_sched_restricted_insns_priority
= (rs6000_sched_groups ? 1 : 0);
- if (rs6000_sched_restricted_insns_priority_str)
- rs6000_sched_restricted_insns_priority =
- atoi (rs6000_sched_restricted_insns_priority_str);
/* Handle -msched-costly-dep option. */
rs6000_sched_costly_dep
= (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
+
if (rs6000_sched_costly_dep_str)
{
- if (! strcmp (rs6000_sched_costly_dep_str, "no"))
- rs6000_sched_costly_dep = no_dep_costly;
+ if (! strcmp (rs6000_sched_costly_dep_str, "no"))
+ rs6000_sched_costly_dep = no_dep_costly;
else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
- rs6000_sched_costly_dep = all_deps_costly;
+ rs6000_sched_costly_dep = all_deps_costly;
else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
- rs6000_sched_costly_dep = true_store_to_load_dep_costly;
+ rs6000_sched_costly_dep = true_store_to_load_dep_costly;
else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
- rs6000_sched_costly_dep = store_to_load_dep_costly;
- else
- rs6000_sched_costly_dep = atoi (rs6000_sched_costly_dep_str);
+ rs6000_sched_costly_dep = store_to_load_dep_costly;
+ else
+ rs6000_sched_costly_dep = atoi (rs6000_sched_costly_dep_str);
}
/* Handle -minsert-sched-nops option. */
rs6000_sched_insert_nops
= (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
+
if (rs6000_sched_insert_nops_str)
{
if (! strcmp (rs6000_sched_insert_nops_str, "no"))
- rs6000_sched_insert_nops = sched_finish_none;
+ rs6000_sched_insert_nops = sched_finish_none;
else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
- rs6000_sched_insert_nops = sched_finish_pad_groups;
+ rs6000_sched_insert_nops = sched_finish_pad_groups;
else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
- rs6000_sched_insert_nops = sched_finish_regroup_exact;
+ rs6000_sched_insert_nops = sched_finish_regroup_exact;
else
- rs6000_sched_insert_nops = atoi (rs6000_sched_insert_nops_str);
+ rs6000_sched_insert_nops = atoi (rs6000_sched_insert_nops_str);
}
#ifdef TARGET_REGNAMES
@@ -999,25 +1447,16 @@ rs6000_override_options (const char *default_cpu)
memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
#endif
- /* Set TARGET_AIX_STRUCT_RET last, after the ABI is determined.
+ /* Set aix_struct_return last, after the ABI is determined.
If -maix-struct-return or -msvr4-struct-return was explicitly
used, don't override with the ABI default. */
- if ((target_flags_explicit & MASK_AIX_STRUCT_RET) == 0)
- {
- if (DEFAULT_ABI == ABI_V4 && !DRAFT_V4_STRUCT_RET)
- target_flags = (target_flags & ~MASK_AIX_STRUCT_RET);
- else
- target_flags |= MASK_AIX_STRUCT_RET;
- }
+ if (!rs6000_explicit_options.aix_struct_ret)
+ aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
- if (TARGET_LONG_DOUBLE_128
- && (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN))
+ if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
- /* Allocate an alias set for register saves & restores from stack. */
- rs6000_sr_alias_set = new_alias_set ();
-
- if (TARGET_TOC)
+ if (TARGET_TOC)
ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
/* We can only guarantee the availability of DI pseudo-ops when
@@ -1028,9 +1467,23 @@ rs6000_override_options (const char *default_cpu)
targetm.asm_out.unaligned_op.di = NULL;
}
- /* Set maximum branch target alignment at two instructions, eight bytes. */
- align_jumps_max_skip = 8;
- align_loops_max_skip = 8;
+ /* Set branch target alignment, if not optimizing for size. */
+ if (!optimize_size)
+ {
+ if (rs6000_sched_groups)
+ {
+ if (align_functions <= 0)
+ align_functions = 16;
+ if (align_jumps <= 0)
+ align_jumps = 16;
+ if (align_loops <= 0)
+ align_loops = 16;
+ }
+ if (align_jumps_max_skip <= 0)
+ align_jumps_max_skip = 15;
+ if (align_loops_max_skip <= 0)
+ align_loops_max_skip = 15;
+ }
/* Arrange to save and restore machine status around nested functions. */
init_machine_status = rs6000_init_machine_status;
@@ -1039,6 +1492,96 @@ rs6000_override_options (const char *default_cpu)
Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
if (DEFAULT_ABI != ABI_AIX)
targetm.calls.split_complex_arg = NULL;
+
+ /* Initialize rs6000_cost with the appropriate target costs. */
+ if (optimize_size)
+ rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
+ else
+ switch (rs6000_cpu)
+ {
+ case PROCESSOR_RIOS1:
+ rs6000_cost = &rios1_cost;
+ break;
+
+ case PROCESSOR_RIOS2:
+ rs6000_cost = &rios2_cost;
+ break;
+
+ case PROCESSOR_RS64A:
+ rs6000_cost = &rs64a_cost;
+ break;
+
+ case PROCESSOR_MPCCORE:
+ rs6000_cost = &mpccore_cost;
+ break;
+
+ case PROCESSOR_PPC403:
+ rs6000_cost = &ppc403_cost;
+ break;
+
+ case PROCESSOR_PPC405:
+ rs6000_cost = &ppc405_cost;
+ break;
+
+ case PROCESSOR_PPC440:
+ rs6000_cost = &ppc440_cost;
+ break;
+
+ case PROCESSOR_PPC601:
+ rs6000_cost = &ppc601_cost;
+ break;
+
+ case PROCESSOR_PPC603:
+ rs6000_cost = &ppc603_cost;
+ break;
+
+ case PROCESSOR_PPC604:
+ rs6000_cost = &ppc604_cost;
+ break;
+
+ case PROCESSOR_PPC604e:
+ rs6000_cost = &ppc604e_cost;
+ break;
+
+ case PROCESSOR_PPC620:
+ rs6000_cost = &ppc620_cost;
+ break;
+
+ case PROCESSOR_PPC630:
+ rs6000_cost = &ppc630_cost;
+ break;
+
+ case PROCESSOR_PPC750:
+ case PROCESSOR_PPC7400:
+ rs6000_cost = &ppc750_cost;
+ break;
+
+ case PROCESSOR_PPC7450:
+ rs6000_cost = &ppc7450_cost;
+ break;
+
+ case PROCESSOR_PPC8540:
+ rs6000_cost = &ppc8540_cost;
+ break;
+
+ case PROCESSOR_POWER4:
+ case PROCESSOR_POWER5:
+ rs6000_cost = &power4_cost;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Implement targetm.vectorize.builtin_mask_for_load. */
+static tree
+rs6000_builtin_mask_for_load (void)
+{
+ if (TARGET_ALTIVEC)
+ return altivec_builtin_mask_for_load;
+ else
+ return 0;
}
/* Handle generic options of the form -mfoo=yes/no.
@@ -1059,48 +1602,6 @@ rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
error ("unknown -m%s= option specified: '%s'", name, value);
}
-/* Handle -mabi= options. */
-static void
-rs6000_parse_abi_options (void)
-{
- if (rs6000_abi_string == 0)
- return;
- else if (! strcmp (rs6000_abi_string, "altivec"))
- {
- rs6000_altivec_abi = 1;
- rs6000_spe_abi = 0;
- }
- else if (! strcmp (rs6000_abi_string, "no-altivec"))
- rs6000_altivec_abi = 0;
- else if (! strcmp (rs6000_abi_string, "spe"))
- {
- rs6000_spe_abi = 1;
- rs6000_altivec_abi = 0;
- if (!TARGET_SPE_ABI)
- error ("not configured for ABI: '%s'", rs6000_abi_string);
- }
-
- else if (! strcmp (rs6000_abi_string, "no-spe"))
- rs6000_spe_abi = 0;
- else
- error ("unknown ABI specified: '%s'", rs6000_abi_string);
-}
-
-/* Handle -malign-XXXXXX options. */
-static void
-rs6000_parse_alignment_option (void)
-{
- if (rs6000_alignment_string == 0)
- return;
- else if (! strcmp (rs6000_alignment_string, "power"))
- rs6000_alignment_flags = MASK_ALIGN_POWER;
- else if (! strcmp (rs6000_alignment_string, "natural"))
- rs6000_alignment_flags = MASK_ALIGN_NATURAL;
- else
- error ("unknown -malign-XXXXX option specified: '%s'",
- rs6000_alignment_string);
-}
-
/* Validate and record the size specified with the -mtls-size option. */
static void
@@ -1115,12 +1616,313 @@ rs6000_parse_tls_size_option (void)
else if (strcmp (rs6000_tls_size_string, "64") == 0)
rs6000_tls_size = 64;
else
- error ("bad value `%s' for -mtls-size switch", rs6000_tls_size_string);
+ error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string);
}
void
optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
{
+ if (DEFAULT_ABI == ABI_DARWIN)
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+
+ /* Double growth factor to counter reduced min jump length. */
+ set_param_value ("max-grow-copy-bb-insns", 16);
+
+ /* Enable section anchors by default.
+ Skip section anchors for Objective C and Objective C++
+ until front-ends fixed. */
+ if (!TARGET_MACHO && lang_hooks.name[4] != 'O')
+ flag_section_anchors = 1;
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+rs6000_handle_option (size_t code, const char *arg, int value)
+{
+ switch (code)
+ {
+ case OPT_mno_power:
+ target_flags &= ~(MASK_POWER | MASK_POWER2
+ | MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_POWER | MASK_POWER2
+ | MASK_MULTIPLE | MASK_STRING);
+ break;
+ case OPT_mno_powerpc:
+ target_flags &= ~(MASK_POWERPC | MASK_PPC_GPOPT
+ | MASK_PPC_GFXOPT | MASK_POWERPC64);
+ target_flags_explicit |= (MASK_POWERPC | MASK_PPC_GPOPT
+ | MASK_PPC_GFXOPT | MASK_POWERPC64);
+ break;
+ case OPT_mfull_toc:
+ target_flags &= ~MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+#ifdef TARGET_USES_SYSV4_OPT
+ /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
+ just the same as -mminimal-toc. */
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+#endif
+ break;
+
+#ifdef TARGET_USES_SYSV4_OPT
+ case OPT_mtoc:
+ /* Make -mtoc behave like -mminimal-toc. */
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+ break;
+#endif
+
+#ifdef TARGET_USES_AIX64_OPT
+ case OPT_maix64:
+#else
+ case OPT_m64:
+#endif
+ target_flags |= MASK_POWERPC64 | MASK_POWERPC;
+ target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
+ target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
+ break;
+
+#ifdef TARGET_USES_AIX64_OPT
+ case OPT_maix32:
+#else
+ case OPT_m32:
+#endif
+ target_flags &= ~MASK_POWERPC64;
+ target_flags_explicit |= MASK_POWERPC64;
+ break;
+
+ case OPT_minsert_sched_nops_:
+ rs6000_sched_insert_nops_str = arg;
+ break;
+
+ case OPT_mminimal_toc:
+ if (value == 1)
+ {
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
+ }
+ break;
+
+ case OPT_mpower:
+ if (value == 1)
+ {
+ target_flags |= (MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_MULTIPLE | MASK_STRING);
+ }
+ break;
+
+ case OPT_mpower2:
+ if (value == 1)
+ {
+ target_flags |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
+ }
+ break;
+
+ case OPT_mpowerpc_gpopt:
+ case OPT_mpowerpc_gfxopt:
+ if (value == 1)
+ {
+ target_flags |= MASK_POWERPC;
+ target_flags_explicit |= MASK_POWERPC;
+ }
+ break;
+
+ case OPT_maix_struct_return:
+ case OPT_msvr4_struct_return:
+ rs6000_explicit_options.aix_struct_ret = true;
+ break;
+
+ case OPT_mvrsave_:
+ rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
+ break;
+
+ case OPT_misel_:
+ rs6000_explicit_options.isel = true;
+ rs6000_parse_yes_no_option ("isel", arg, &(rs6000_isel));
+ break;
+
+ case OPT_mspe_:
+ rs6000_explicit_options.spe = true;
+ rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
+ /* No SPE means 64-bit long doubles, even if an E500. */
+ if (!rs6000_spe)
+ rs6000_long_double_type_size = 64;
+ break;
+
+ case OPT_mdebug_:
+ rs6000_debug_name = arg;
+ break;
+
+#ifdef TARGET_USES_SYSV4_OPT
+ case OPT_mcall_:
+ rs6000_abi_name = arg;
+ break;
+
+ case OPT_msdata_:
+ rs6000_sdata_name = arg;
+ break;
+
+ case OPT_mtls_size_:
+ rs6000_tls_size_string = arg;
+ break;
+
+ case OPT_mrelocatable:
+ if (value == 1)
+ {
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 1;
+ }
+ break;
+
+ case OPT_mrelocatable_lib:
+ if (value == 1)
+ {
+ target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 1;
+ }
+ else
+ {
+ target_flags &= ~MASK_RELOCATABLE;
+ target_flags_explicit |= MASK_RELOCATABLE;
+ }
+ break;
+#endif
+
+ case OPT_mabi_:
+ if (!strcmp (arg, "altivec"))
+ {
+ rs6000_explicit_options.abi = true;
+ rs6000_altivec_abi = 1;
+ rs6000_spe_abi = 0;
+ }
+ else if (! strcmp (arg, "no-altivec"))
+ {
+ /* ??? Don't set rs6000_explicit_options.abi here, to allow
+ the default for rs6000_spe_abi to be chosen later. */
+ rs6000_altivec_abi = 0;
+ }
+ else if (! strcmp (arg, "spe"))
+ {
+ rs6000_explicit_options.abi = true;
+ rs6000_spe_abi = 1;
+ rs6000_altivec_abi = 0;
+ if (!TARGET_SPE_ABI)
+ error ("not configured for ABI: '%s'", arg);
+ }
+ else if (! strcmp (arg, "no-spe"))
+ {
+ rs6000_explicit_options.abi = true;
+ rs6000_spe_abi = 0;
+ }
+
+ /* These are here for testing during development only, do not
+ document in the manual please. */
+ else if (! strcmp (arg, "d64"))
+ {
+ rs6000_darwin64_abi = 1;
+ warning (0, "Using darwin64 ABI");
+ }
+ else if (! strcmp (arg, "d32"))
+ {
+ rs6000_darwin64_abi = 0;
+ warning (0, "Using old darwin ABI");
+ }
+
+ else if (! strcmp (arg, "ibmlongdouble"))
+ {
+ rs6000_explicit_options.ieee = true;
+ rs6000_ieeequad = 0;
+ warning (0, "Using IBM extended precision long double");
+ }
+ else if (! strcmp (arg, "ieeelongdouble"))
+ {
+ rs6000_explicit_options.ieee = true;
+ rs6000_ieeequad = 1;
+ warning (0, "Using IEEE extended precision long double");
+ }
+
+ else
+ {
+ error ("unknown ABI specified: '%s'", arg);
+ return false;
+ }
+ break;
+
+ case OPT_mcpu_:
+ rs6000_select[1].string = arg;
+ break;
+
+ case OPT_mtune_:
+ rs6000_select[2].string = arg;
+ break;
+
+ case OPT_mtraceback_:
+ rs6000_traceback_name = arg;
+ break;
+
+ case OPT_mfloat_gprs_:
+ rs6000_explicit_options.float_gprs = true;
+ if (! strcmp (arg, "yes") || ! strcmp (arg, "single"))
+ rs6000_float_gprs = 1;
+ else if (! strcmp (arg, "double"))
+ rs6000_float_gprs = 2;
+ else if (! strcmp (arg, "no"))
+ rs6000_float_gprs = 0;
+ else
+ {
+ error ("invalid option for -mfloat-gprs: '%s'", arg);
+ return false;
+ }
+ break;
+
+ case OPT_mlong_double_:
+ rs6000_explicit_options.long_double = true;
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+ if (value != 64 && value != 128)
+ {
+ error ("Unknown switch -mlong-double-%s", arg);
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+ return false;
+ }
+ else
+ rs6000_long_double_type_size = value;
+ break;
+
+ case OPT_msched_costly_dep_:
+ rs6000_sched_costly_dep_str = arg;
+ break;
+
+ case OPT_malign_:
+ rs6000_explicit_options.alignment = true;
+ if (! strcmp (arg, "power"))
+ {
+ /* On 64-bit Darwin, power alignment is ABI-incompatible with
+ some C library functions, so warn about it. The flag may be
+ useful for performance studies from time to time though, so
+ don't disable it entirely. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ warning (0, "-malign-power is not supported for 64-bit Darwin;"
+ " it is incompatible with the installed C and C++ libraries");
+ rs6000_alignment_flags = MASK_ALIGN_POWER;
+ }
+ else if (! strcmp (arg, "natural"))
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
+ else
+ {
+ error ("unknown -malign-XXXXX option specified: '%s'", arg);
+ return false;
+ }
+ break;
+ }
+ return true;
}
/* Do anything needed at the start of the asm file. */
@@ -1157,6 +1959,12 @@ rs6000_file_start (void)
}
}
+ if (PPC405_ERRATUM77)
+ {
+ fprintf (file, "%s PPC405CR_ERRATUM77", start);
+ start = "";
+ }
+
#ifdef USING_ELFOS_H
switch (rs6000_sdata)
{
@@ -1177,7 +1985,14 @@ rs6000_file_start (void)
if (*start == '\0')
putc ('\n', file);
}
+
+ if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
+ {
+ switch_to_section (toc_section);
+ switch_to_section (text_section);
+ }
}
+
/* Return nonzero if this function is known to have a null epilogue. */
@@ -1201,310 +2016,19 @@ direct_return (void)
return 0;
}
-/* Returns 1 always. */
-
-int
-any_operand (rtx op ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return 1;
-}
-
-/* Returns 1 if op is the count register. */
-int
-count_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (op) != REG)
- return 0;
-
- if (REGNO (op) == COUNT_REGISTER_REGNUM)
- return 1;
-
- if (REGNO (op) > FIRST_PSEUDO_REGISTER)
- return 1;
-
- return 0;
-}
-
-/* Returns 1 if op is an altivec register. */
-int
-altivec_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
-
- return (register_operand (op, mode)
- && (GET_CODE (op) != REG
- || REGNO (op) > FIRST_PSEUDO_REGISTER
- || ALTIVEC_REGNO_P (REGNO (op))));
-}
-
-int
-xer_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (op) != REG)
- return 0;
-
- if (XER_REGNO_P (REGNO (op)))
- return 1;
-
- return 0;
-}
-
-/* Return 1 if OP is a signed 8-bit constant. Int multiplication
- by such constants completes more quickly. */
-
-int
-s8bit_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return ( GET_CODE (op) == CONST_INT
- && (INTVAL (op) >= -128 && INTVAL (op) <= 127));
-}
-
-/* Return 1 if OP is a constant that can fit in a D field. */
-
-int
-short_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (op), 'I'));
-}
-
-/* Similar for an unsigned D field. */
-
-int
-u_short_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (op) & GET_MODE_MASK (mode), 'K'));
-}
-
-/* Return 1 if OP is a CONST_INT that cannot fit in a signed D field. */
-
-int
-non_short_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == CONST_INT
- && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x8000) >= 0x10000);
-}
-
-/* Returns 1 if OP is a CONST_INT that is a positive value
- and an exact power of 2. */
-
-int
-exact_log2_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == CONST_INT
- && INTVAL (op) > 0
- && exact_log2 (INTVAL (op)) >= 0);
-}
-
-/* Returns 1 if OP is a register that is not special (i.e., not MQ,
- ctr, or lr). */
-
-int
-gpc_reg_operand (rtx op, enum machine_mode mode)
-{
- return (register_operand (op, mode)
- && (GET_CODE (op) != REG
- || (REGNO (op) >= ARG_POINTER_REGNUM
- && !XER_REGNO_P (REGNO (op)))
- || REGNO (op) < MQ_REGNO));
-}
-
-/* Returns 1 if OP is either a pseudo-register or a register denoting a
- CR field. */
-
-int
-cc_reg_operand (rtx op, enum machine_mode mode)
-{
- return (register_operand (op, mode)
- && (GET_CODE (op) != REG
- || REGNO (op) >= FIRST_PSEUDO_REGISTER
- || CR_REGNO_P (REGNO (op))));
-}
-
-/* Returns 1 if OP is either a pseudo-register or a register denoting a
- CR field that isn't CR0. */
-
-int
-cc_reg_not_cr0_operand (rtx op, enum machine_mode mode)
-{
- return (register_operand (op, mode)
- && (GET_CODE (op) != REG
- || REGNO (op) >= FIRST_PSEUDO_REGISTER
- || CR_REGNO_NOT_CR0_P (REGNO (op))));
-}
-
-/* Returns 1 if OP is either a constant integer valid for a D-field or
- a non-special register. If a register, it must be in the proper
- mode unless MODE is VOIDmode. */
-
-int
-reg_or_short_operand (rtx op, enum machine_mode mode)
-{
- return short_cint_operand (op, mode) || gpc_reg_operand (op, mode);
-}
-
-/* Similar, except check if the negation of the constant would be
- valid for a D-field. Don't allow a constant zero, since all the
- patterns that call this predicate use "addic r1,r2,-constant" on
- a constant value to set a carry when r2 is greater or equal to
- "constant". That doesn't work for zero. */
-
-int
-reg_or_neg_short_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- return CONST_OK_FOR_LETTER_P (INTVAL (op), 'P') && INTVAL (op) != 0;
-
- return gpc_reg_operand (op, mode);
-}
-
-/* Returns 1 if OP is either a constant integer valid for a DS-field or
- a non-special register. If a register, it must be in the proper
- mode unless MODE is VOIDmode. */
-
-int
-reg_or_aligned_short_operand (rtx op, enum machine_mode mode)
-{
- if (gpc_reg_operand (op, mode))
- return 1;
- else if (short_cint_operand (op, mode) && !(INTVAL (op) & 3))
- return 1;
-
- return 0;
-}
-
-
-/* Return 1 if the operand is either a register or an integer whose
- high-order 16 bits are zero. */
-
-int
-reg_or_u_short_operand (rtx op, enum machine_mode mode)
-{
- return u_short_cint_operand (op, mode) || gpc_reg_operand (op, mode);
-}
-
-/* Return 1 is the operand is either a non-special register or ANY
- constant integer. */
-
-int
-reg_or_cint_operand (rtx op, enum machine_mode mode)
-{
- return (GET_CODE (op) == CONST_INT || gpc_reg_operand (op, mode));
-}
-
-/* Return 1 is the operand is either a non-special register or ANY
- 32-bit signed constant integer. */
-
-int
-reg_or_arith_cint_operand (rtx op, enum machine_mode mode)
-{
- return (gpc_reg_operand (op, mode)
- || (GET_CODE (op) == CONST_INT
-#if HOST_BITS_PER_WIDE_INT != 32
- && ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80000000)
- < (unsigned HOST_WIDE_INT) 0x100000000ll)
-#endif
- ));
-}
-
-/* Return 1 is the operand is either a non-special register or a 32-bit
- signed constant integer valid for 64-bit addition. */
-
-int
-reg_or_add_cint64_operand (rtx op, enum machine_mode mode)
-{
- return (gpc_reg_operand (op, mode)
- || (GET_CODE (op) == CONST_INT
-#if HOST_BITS_PER_WIDE_INT == 32
- && INTVAL (op) < 0x7fff8000
-#else
- && ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80008000)
- < 0x100000000ll)
-#endif
- ));
-}
-
-/* Return 1 is the operand is either a non-special register or a 32-bit
- signed constant integer valid for 64-bit subtraction. */
-
-int
-reg_or_sub_cint64_operand (rtx op, enum machine_mode mode)
-{
- return (gpc_reg_operand (op, mode)
- || (GET_CODE (op) == CONST_INT
-#if HOST_BITS_PER_WIDE_INT == 32
- && (- INTVAL (op)) < 0x7fff8000
-#else
- && ((unsigned HOST_WIDE_INT) ((- INTVAL (op)) + 0x80008000)
- < 0x100000000ll)
-#endif
- ));
-}
-
-/* Return 1 is the operand is either a non-special register or ANY
- 32-bit unsigned constant integer. */
-
-int
-reg_or_logical_cint_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- {
- if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
- {
- if (GET_MODE_BITSIZE (mode) <= 32)
- abort ();
-
- if (INTVAL (op) < 0)
- return 0;
- }
-
- return ((INTVAL (op) & GET_MODE_MASK (mode)
- & (~ (unsigned HOST_WIDE_INT) 0xffffffff)) == 0);
- }
- else if (GET_CODE (op) == CONST_DOUBLE)
- {
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- || mode != DImode)
- abort ();
-
- return CONST_DOUBLE_HIGH (op) == 0;
- }
- else
- return gpc_reg_operand (op, mode);
-}
-
-/* Return 1 if the operand is an operand that can be loaded via the GOT. */
-
-int
-got_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == SYMBOL_REF
- || GET_CODE (op) == CONST
- || GET_CODE (op) == LABEL_REF);
-}
-
-/* Return 1 if the operand is a simple references that can be loaded via
- the GOT (labels involving addition aren't allowed). */
-
-int
-got_no_const_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF);
-}
-
/* Return the number of instructions it takes to form a constant in an
integer register. */
-static int
+int
num_insns_constant_wide (HOST_WIDE_INT value)
{
/* signed constant loadable with {cal|addi} */
- if (CONST_OK_FOR_LETTER_P (value, 'I'))
+ if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
return 1;
/* constant loadable with {cau|addis} */
- else if (CONST_OK_FOR_LETTER_P (value, 'L'))
+ else if ((value & 0xffff) == 0
+ && (value >> 31 == -1 || value >> 31 == 0))
return 1;
#if HOST_BITS_PER_WIDE_INT == 64
@@ -1533,285 +2057,228 @@ num_insns_constant_wide (HOST_WIDE_INT value)
int
num_insns_constant (rtx op, enum machine_mode mode)
{
- if (GET_CODE (op) == CONST_INT)
+ HOST_WIDE_INT low, high;
+
+ switch (GET_CODE (op))
{
+ case CONST_INT:
#if HOST_BITS_PER_WIDE_INT == 64
if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
&& mask64_operand (op, mode))
- return 2;
+ return 2;
else
#endif
return num_insns_constant_wide (INTVAL (op));
- }
-
- else if (GET_CODE (op) == CONST_DOUBLE && mode == SFmode)
- {
- long l;
- REAL_VALUE_TYPE rv;
- REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
- REAL_VALUE_TO_TARGET_SINGLE (rv, l);
- return num_insns_constant_wide ((HOST_WIDE_INT) l);
- }
+ case CONST_DOUBLE:
+ if (mode == SFmode)
+ {
+ long l;
+ REAL_VALUE_TYPE rv;
- else if (GET_CODE (op) == CONST_DOUBLE)
- {
- HOST_WIDE_INT low;
- HOST_WIDE_INT high;
- long l[2];
- REAL_VALUE_TYPE rv;
- int endian = (WORDS_BIG_ENDIAN == 0);
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+ return num_insns_constant_wide ((HOST_WIDE_INT) l);
+ }
- if (mode == VOIDmode || mode == DImode)
- {
- high = CONST_DOUBLE_HIGH (op);
- low = CONST_DOUBLE_LOW (op);
- }
- else
- {
- REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
- REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
- high = l[endian];
- low = l[1 - endian];
- }
+ if (mode == VOIDmode || mode == DImode)
+ {
+ high = CONST_DOUBLE_HIGH (op);
+ low = CONST_DOUBLE_LOW (op);
+ }
+ else
+ {
+ long l[2];
+ REAL_VALUE_TYPE rv;
- if (TARGET_32BIT)
- return (num_insns_constant_wide (low)
- + num_insns_constant_wide (high));
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+ high = l[WORDS_BIG_ENDIAN == 0];
+ low = l[WORDS_BIG_ENDIAN != 0];
+ }
- else
- {
- if (high == 0 && low >= 0)
- return num_insns_constant_wide (low);
+ if (TARGET_32BIT)
+ return (num_insns_constant_wide (low)
+ + num_insns_constant_wide (high));
+ else
+ {
+ if ((high == 0 && low >= 0)
+ || (high == -1 && low < 0))
+ return num_insns_constant_wide (low);
- else if (high == -1 && low < 0)
- return num_insns_constant_wide (low);
+ else if (mask64_operand (op, mode))
+ return 2;
- else if (mask64_operand (op, mode))
- return 2;
+ else if (low == 0)
+ return num_insns_constant_wide (high) + 1;
- else if (low == 0)
- return num_insns_constant_wide (high) + 1;
+ else
+ return (num_insns_constant_wide (high)
+ + num_insns_constant_wide (low) + 1);
+ }
- else
- return (num_insns_constant_wide (high)
- + num_insns_constant_wide (low) + 1);
- }
+ default:
+ gcc_unreachable ();
}
-
- else
- abort ();
}
-/* Return 1 if the operand is a CONST_DOUBLE and it can be put into a
- register with one instruction per word. We only do this if we can
- safely read CONST_DOUBLE_{LOW,HIGH}. */
+/* Interpret element ELT of the CONST_VECTOR OP as an integer value.
+ If the mode of OP is MODE_VECTOR_INT, this simply returns the
+ corresponding element of the vector, but for V4SFmode and V2SFmode,
+ the corresponding "float" is interpreted as an SImode integer. */
-int
-easy_fp_constant (rtx op, enum machine_mode mode)
+static HOST_WIDE_INT
+const_vector_elt_as_int (rtx op, unsigned int elt)
{
- if (GET_CODE (op) != CONST_DOUBLE
- || GET_MODE (op) != mode
- || (GET_MODE_CLASS (mode) != MODE_FLOAT && mode != DImode))
- return 0;
-
- /* Consider all constants with -msoft-float to be easy. */
- if ((TARGET_SOFT_FLOAT || !TARGET_FPRS)
- && mode != DImode)
- return 1;
-
- /* If we are using V.4 style PIC, consider all constants to be hard. */
- if (flag_pic && DEFAULT_ABI == ABI_V4)
- return 0;
+ rtx tmp = CONST_VECTOR_ELT (op, elt);
+ if (GET_MODE (op) == V4SFmode
+ || GET_MODE (op) == V2SFmode)
+ tmp = gen_lowpart (SImode, tmp);
+ return INTVAL (tmp);
+}
-#ifdef TARGET_RELOCATABLE
- /* Similarly if we are using -mrelocatable, consider all constants
- to be hard. */
- if (TARGET_RELOCATABLE)
- return 0;
-#endif
+/* Return true if OP can be synthesized with a particular vspltisb, vspltish
+ or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
+ depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
+ all items are set to the same value and contain COPIES replicas of the
+ vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
+ operand and the others are set to the value of the operand's msb. */
- if (mode == TFmode)
- {
- long k[4];
- REAL_VALUE_TYPE rv;
+static bool
+vspltis_constant (rtx op, unsigned step, unsigned copies)
+{
+ enum machine_mode mode = GET_MODE (op);
+ enum machine_mode inner = GET_MODE_INNER (mode);
- REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
- REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+ unsigned i;
+ unsigned nunits = GET_MODE_NUNITS (mode);
+ unsigned bitsize = GET_MODE_BITSIZE (inner);
+ unsigned mask = GET_MODE_MASK (inner);
- return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
- && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1
- && num_insns_constant_wide ((HOST_WIDE_INT) k[2]) == 1
- && num_insns_constant_wide ((HOST_WIDE_INT) k[3]) == 1);
- }
+ HOST_WIDE_INT val = const_vector_elt_as_int (op, nunits - 1);
+ HOST_WIDE_INT splat_val = val;
+ HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
- else if (mode == DFmode)
+ /* Construct the value to be splatted, if possible. If not, return 0. */
+ for (i = 2; i <= copies; i *= 2)
{
- long k[2];
- REAL_VALUE_TYPE rv;
-
- REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
- REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
-
- return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
- && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1);
+ HOST_WIDE_INT small_val;
+ bitsize /= 2;
+ small_val = splat_val >> bitsize;
+ mask >>= bitsize;
+ if (splat_val != ((small_val << bitsize) | (small_val & mask)))
+ return false;
+ splat_val = small_val;
}
- else if (mode == SFmode)
- {
- long l;
- REAL_VALUE_TYPE rv;
-
- REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
- REAL_VALUE_TO_TARGET_SINGLE (rv, l);
-
- return num_insns_constant_wide (l) == 1;
- }
+ /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
+ if (EASY_VECTOR_15 (splat_val))
+ ;
- else if (mode == DImode)
- return ((TARGET_POWERPC64
- && GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_LOW (op) == 0)
- || (num_insns_constant (op, DImode) <= 2));
+ /* Also check if we can splat, and then add the result to itself. Do so if
+ the value is positive, of if the splat instruction is using OP's mode;
+ for splat_val < 0, the splat and the add should use the same mode. */
+ else if (EASY_VECTOR_15_ADD_SELF (splat_val)
+ && (splat_val >= 0 || (step == 1 && copies == 1)))
+ ;
- else if (mode == SImode)
- return 1;
else
- abort ();
-}
-
-/* Returns the constant for the splat instrunction, if exists. */
+ return false;
-static int
-easy_vector_splat_const (int cst, enum machine_mode mode)
-{
- switch (mode)
+ /* Check if VAL is present in every STEP-th element, and the
+ other elements are filled with its most significant bit. */
+ for (i = 0; i < nunits - 1; ++i)
{
- case V4SImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
- return cst;
- if ((cst & 0xffff) != ((cst >> 16) & 0xffff))
- break;
- cst = cst >> 16;
- case V8HImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
- return cst;
- if ((cst & 0xff) != ((cst >> 8) & 0xff))
- break;
- cst = cst >> 8;
- case V16QImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
- return cst;
- default:
- break;
+ HOST_WIDE_INT desired_val;
+ if (((i + 1) & (step - 1)) == 0)
+ desired_val = val;
+ else
+ desired_val = msb_val;
+
+ if (desired_val != const_vector_elt_as_int (op, i))
+ return false;
}
- return 0;
+
+ return true;
}
-/* Return nonzero if all elements of a vector have the same value. */
+/* Return true if OP is of the given MODE and can be synthesized
+ with a vspltisb, vspltish or vspltisw. */
-static int
-easy_vector_same (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+bool
+easy_altivec_constant (rtx op, enum machine_mode mode)
{
- int units, i, cst;
-
- units = CONST_VECTOR_NUNITS (op);
-
- cst = INTVAL (CONST_VECTOR_ELT (op, 0));
- for (i = 1; i < units; ++i)
- if (INTVAL (CONST_VECTOR_ELT (op, i)) != cst)
- break;
- if (i == units && easy_vector_splat_const (cst, mode))
- return 1;
- return 0;
-}
+ unsigned step, copies;
-/* Return 1 if the operand is a CONST_INT and can be put into a
- register without using memory. */
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+ else if (mode != GET_MODE (op))
+ return false;
-int
-easy_vector_constant (rtx op, enum machine_mode mode)
-{
- int cst, cst2;
+ /* Start with a vspltisw. */
+ step = GET_MODE_NUNITS (mode) / 4;
+ copies = 1;
- if (GET_CODE (op) != CONST_VECTOR
- || (!TARGET_ALTIVEC
- && !TARGET_SPE))
- return 0;
+ if (vspltis_constant (op, step, copies))
+ return true;
- if (zero_constant (op, mode)
- && ((TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
- || (TARGET_SPE && SPE_VECTOR_MODE (mode))))
- return 1;
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
- if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
- return 0;
+ if (vspltis_constant (op, step, copies))
+ return true;
- if (TARGET_SPE && mode == V1DImode)
- return 0;
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
- cst = INTVAL (CONST_VECTOR_ELT (op, 0));
- cst2 = INTVAL (CONST_VECTOR_ELT (op, 1));
-
- /* Limit SPE vectors to 15 bits signed. These we can generate with:
- li r0, CONSTANT1
- evmergelo r0, r0, r0
- li r0, CONSTANT2
-
- I don't know how efficient it would be to allow bigger constants,
- considering we'll have an extra 'ori' for every 'li'. I doubt 5
- instructions is better than a 64-bit memory load, but I don't
- have the e500 timing specs. */
- if (TARGET_SPE && mode == V2SImode
- && cst >= -0x7fff && cst <= 0x7fff
- && cst2 >= -0x7fff && cst2 <= 0x7fff)
- return 1;
+ if (vspltis_constant (op, step, copies))
+ return true;
- if (TARGET_ALTIVEC
- && easy_vector_same (op, mode))
- {
- cst = easy_vector_splat_const (cst, mode);
- if (EASY_VECTOR_15_ADD_SELF (cst)
- || EASY_VECTOR_15 (cst))
- return 1;
- }
- return 0;
+ return false;
}
-/* Same as easy_vector_constant but only for EASY_VECTOR_15_ADD_SELF. */
+/* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
+ result is OP. Abort if it is not possible. */
-int
-easy_vector_constant_add_self (rtx op, enum machine_mode mode)
+rtx
+gen_easy_altivec_constant (rtx op)
{
- int cst;
- if (TARGET_ALTIVEC
- && GET_CODE (op) == CONST_VECTOR
- && easy_vector_same (op, mode))
- {
- cst = easy_vector_splat_const (INTVAL (CONST_VECTOR_ELT (op, 0)), mode);
- if (EASY_VECTOR_15_ADD_SELF (cst))
- return 1;
- }
- return 0;
-}
+ enum machine_mode mode = GET_MODE (op);
+ int nunits = GET_MODE_NUNITS (mode);
+ rtx last = CONST_VECTOR_ELT (op, nunits - 1);
+ unsigned step = nunits / 4;
+ unsigned copies = 1;
-/* Generate easy_vector_constant out of a easy_vector_constant_add_self. */
+ /* Start with a vspltisw. */
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
-rtx
-gen_easy_vector_constant_add_self (rtx op)
-{
- int i, units;
- rtvec v;
- units = GET_MODE_NUNITS (GET_MODE (op));
- v = rtvec_alloc (units);
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
- for (i = 0; i < units; i++)
- RTVEC_ELT (v, i) =
- GEN_INT (INTVAL (CONST_VECTOR_ELT (op, i)) >> 1);
- return gen_rtx_raw_CONST_VECTOR (GET_MODE (op), v);
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
+
+ gcc_unreachable ();
}
const char *
@@ -1823,354 +2290,214 @@ output_vec_const_move (rtx *operands)
dest = operands[0];
vec = operands[1];
-
- cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
- cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
mode = GET_MODE (dest);
if (TARGET_ALTIVEC)
{
+ rtx splat_vec;
if (zero_constant (vec, mode))
return "vxor %0,%0,%0";
- else if (easy_vector_constant (vec, mode))
- {
- operands[1] = GEN_INT (cst);
- switch (mode)
- {
- case V4SImode:
- if (EASY_VECTOR_15 (cst))
- {
- operands[1] = GEN_INT (cst);
- return "vspltisw %0,%1";
- }
- else if (EASY_VECTOR_15_ADD_SELF (cst))
- return "#";
- cst = cst >> 16;
- case V8HImode:
- if (EASY_VECTOR_15 (cst))
- {
- operands[1] = GEN_INT (cst);
- return "vspltish %0,%1";
- }
- else if (EASY_VECTOR_15_ADD_SELF (cst))
- return "#";
- cst = cst >> 8;
- case V16QImode:
- if (EASY_VECTOR_15 (cst))
- {
- operands[1] = GEN_INT (cst);
- return "vspltisb %0,%1";
- }
- else if (EASY_VECTOR_15_ADD_SELF (cst))
- return "#";
- default:
- abort ();
- }
- }
- else
- abort ();
- }
-
- if (TARGET_SPE)
- {
- /* Vector constant 0 is handled as a splitter of V2SI, and in the
- pattern of V1DI, V4HI, and V2SF.
-
- FIXME: We should probably return # and add post reload
- splitters for these, but this way is so easy ;-).
- */
- operands[1] = GEN_INT (cst);
- operands[2] = GEN_INT (cst2);
- if (cst == cst2)
- return "li %0,%1\n\tevmergelo %0,%0,%0";
- else
- return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
- }
-
- abort ();
-}
-/* Return 1 if the operand is the constant 0. This works for scalars
- as well as vectors. */
-int
-zero_constant (rtx op, enum machine_mode mode)
-{
- return op == CONST0_RTX (mode);
-}
+ splat_vec = gen_easy_altivec_constant (vec);
+ gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
+ operands[1] = XEXP (splat_vec, 0);
+ if (!EASY_VECTOR_15 (INTVAL (operands[1])))
+ return "#";
-/* Return 1 if the operand is 0.0. */
-int
-zero_fp_constant (rtx op, enum machine_mode mode)
-{
- return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
-}
-
-/* Return 1 if the operand is in volatile memory. Note that during
- the RTL generation phase, memory_operand does not return TRUE for
- volatile memory references. So this function allows us to
- recognize volatile references where its safe. */
-
-int
-volatile_mem_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) != MEM)
- return 0;
-
- if (!MEM_VOLATILE_P (op))
- return 0;
-
- if (mode != GET_MODE (op))
- return 0;
-
- if (reload_completed)
- return memory_operand (op, mode);
-
- if (reload_in_progress)
- return strict_memory_address_p (mode, XEXP (op, 0));
-
- return memory_address_p (mode, XEXP (op, 0));
-}
-
-/* Return 1 if the operand is an offsettable memory operand. */
-
-int
-offsettable_mem_operand (rtx op, enum machine_mode mode)
-{
- return ((GET_CODE (op) == MEM)
- && offsettable_address_p (reload_completed || reload_in_progress,
- mode, XEXP (op, 0)));
-}
-
-/* Return 1 if the operand is either an easy FP constant (see above) or
- memory. */
+ switch (GET_MODE (splat_vec))
+ {
+ case V4SImode:
+ return "vspltisw %0,%1";
-int
-mem_or_easy_const_operand (rtx op, enum machine_mode mode)
-{
- return memory_operand (op, mode) || easy_fp_constant (op, mode);
-}
+ case V8HImode:
+ return "vspltish %0,%1";
-/* Return 1 if the operand is either a non-special register or an item
- that can be used as the operand of a `mode' add insn. */
+ case V16QImode:
+ return "vspltisb %0,%1";
-int
-add_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
- || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
+ default:
+ gcc_unreachable ();
+ }
+ }
- return gpc_reg_operand (op, mode);
-}
+ gcc_assert (TARGET_SPE);
-/* Return 1 if OP is a constant but not a valid add_operand. */
+ /* Vector constant 0 is handled as a splitter of V2SI, and in the
+ pattern of V1DI, V4HI, and V2SF.
-int
-non_add_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == CONST_INT
- && !CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
- && !CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
+ FIXME: We should probably return # and add post reload
+ splitters for these, but this way is so easy ;-). */
+ cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
+ cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
+ operands[1] = CONST_VECTOR_ELT (vec, 0);
+ operands[2] = CONST_VECTOR_ELT (vec, 1);
+ if (cst == cst2)
+ return "li %0,%1\n\tevmergelo %0,%0,%0";
+ else
+ return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
}
-/* Return 1 if the operand is a non-special register or a constant that
- can be used as the operand of an OR or XOR insn on the RS/6000. */
-
-int
-logical_operand (rtx op, enum machine_mode mode)
-{
- HOST_WIDE_INT opl, oph;
+/* Initialize vector TARGET to VALS. */
- if (gpc_reg_operand (op, mode))
- return 1;
+void
+rs6000_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_var = 0, one_var = -1;
+ bool all_same = true, all_const_zero = true;
+ rtx x, mem;
+ int i;
- if (GET_CODE (op) == CONST_INT)
+ for (i = 0; i < n_elts; ++i)
{
- opl = INTVAL (op) & GET_MODE_MASK (mode);
+ x = XVECEXP (vals, 0, i);
+ if (!CONSTANT_P (x))
+ ++n_var, one_var = i;
+ else if (x != CONST0_RTX (inner_mode))
+ all_const_zero = false;
-#if HOST_BITS_PER_WIDE_INT <= 32
- if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT && opl < 0)
- return 0;
-#endif
+ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
+ all_same = false;
}
- else if (GET_CODE (op) == CONST_DOUBLE)
- {
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- abort ();
- opl = CONST_DOUBLE_LOW (op);
- oph = CONST_DOUBLE_HIGH (op);
- if (oph != 0)
- return 0;
+ if (n_var == 0)
+ {
+ if (mode != V4SFmode && all_const_zero)
+ {
+ /* Zero register. */
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_XOR (mode, target, target)));
+ return;
+ }
+ else if (mode != V4SFmode && easy_vector_constant (vals, mode))
+ {
+ /* Splat immediate. */
+ emit_insn (gen_rtx_SET (VOIDmode, target, vals));
+ return;
+ }
+ else if (all_same)
+ ; /* Splat vector element. */
+ else
+ {
+ /* Load from constant pool. */
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
+ return;
+ }
}
- else
- return 0;
-
- return ((opl & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0
- || (opl & ~ (unsigned HOST_WIDE_INT) 0xffff0000) == 0);
-}
-
-/* Return 1 if C is a constant that is not a logical operand (as
- above), but could be split into one. */
-
-int
-non_logical_cint_operand (rtx op, enum machine_mode mode)
-{
- return ((GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE)
- && ! logical_operand (op, mode)
- && reg_or_logical_cint_operand (op, mode));
-}
-
-/* Return 1 if C is a constant that can be encoded in a 32-bit mask on the
- RS/6000. It is if there are no more than two 1->0 or 0->1 transitions.
- Reject all ones and all zeros, since these should have been optimized
- away and confuse the making of MB and ME. */
-int
-mask_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- HOST_WIDE_INT c, lsb;
-
- if (GET_CODE (op) != CONST_INT)
- return 0;
-
- c = INTVAL (op);
-
- /* Fail in 64-bit mode if the mask wraps around because the upper
- 32-bits of the mask will all be 1s, contrary to GCC's internal view. */
- if (TARGET_POWERPC64 && (c & 0x80000001) == 0x80000001)
- return 0;
-
- /* We don't change the number of transitions by inverting,
- so make sure we start with the LS bit zero. */
- if (c & 1)
- c = ~c;
-
- /* Reject all zeros or all ones. */
- if (c == 0)
- return 0;
-
- /* Find the first transition. */
- lsb = c & -c;
-
- /* Invert to look for a second transition. */
- c = ~c;
-
- /* Erase first transition. */
- c &= -lsb;
-
- /* Find the second transition (if any). */
- lsb = c & -c;
-
- /* Match if all the bits above are 1's (or c is zero). */
- return c == -lsb;
-}
-
-/* Return 1 for the PowerPC64 rlwinm corner case. */
-
-int
-mask_operand_wrap (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- HOST_WIDE_INT c, lsb;
-
- if (GET_CODE (op) != CONST_INT)
- return 0;
-
- c = INTVAL (op);
-
- if ((c & 0x80000001) != 0x80000001)
- return 0;
-
- c = ~c;
- if (c == 0)
- return 0;
-
- lsb = c & -c;
- c = ~c;
- c &= -lsb;
- lsb = c & -c;
- return c == -lsb;
-}
-
-/* Return 1 if the operand is a constant that is a PowerPC64 mask.
- It is if there are no more than one 1->0 or 0->1 transitions.
- Reject all zeros, since zero should have been optimized away and
- confuses the making of MB and ME. */
+ /* Store value to stack temp. Load vector element. Splat. */
+ if (all_same)
+ {
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
+ XVECEXP (vals, 0, 0));
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ target, mem),
+ x)));
+ x = gen_rtx_VEC_SELECT (inner_mode, target,
+ gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (1, const0_rtx)));
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_VEC_DUPLICATE (mode, x)));
+ return;
+ }
-int
-mask64_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (op) == CONST_INT)
+ /* One field is non-constant. Load constant then overwrite
+ varying field. */
+ if (n_var == 1)
{
- HOST_WIDE_INT c, lsb;
+ rtx copy = copy_rtx (vals);
- c = INTVAL (op);
+ /* Load constant part of vector, substitute neighboring value for
+ varying element. */
+ XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
+ rs6000_expand_vector_init (target, copy);
- /* Reject all zeros. */
- if (c == 0)
- return 0;
-
- /* We don't change the number of transitions by inverting,
- so make sure we start with the LS bit zero. */
- if (c & 1)
- c = ~c;
-
- /* Find the transition, and check that all bits above are 1's. */
- lsb = c & -c;
-
- /* Match if all the bits above are 1's (or c is zero). */
- return c == -lsb;
+ /* Insert variable. */
+ rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
+ return;
}
- return 0;
+
+ /* Construct the vector in memory one field at a time
+ and load the whole vector. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ for (i = 0; i < n_elts; i++)
+ emit_move_insn (adjust_address_nv (mem, inner_mode,
+ i * GET_MODE_SIZE (inner_mode)),
+ XVECEXP (vals, 0, i));
+ emit_move_insn (target, mem);
}
-/* Like mask64_operand, but allow up to three transitions. This
- predicate is used by insn patterns that generate two rldicl or
- rldicr machine insns. */
+/* Set field ELT of TARGET to VAL. */
-int
-mask64_2_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+void
+rs6000_expand_vector_set (rtx target, rtx val, int elt)
{
- if (GET_CODE (op) == CONST_INT)
- {
- HOST_WIDE_INT c, lsb;
-
- c = INTVAL (op);
-
- /* Disallow all zeros. */
- if (c == 0)
- return 0;
-
- /* We don't change the number of transitions by inverting,
- so make sure we start with the LS bit zero. */
- if (c & 1)
- c = ~c;
-
- /* Find the first transition. */
- lsb = c & -c;
-
- /* Invert to look for a second transition. */
- c = ~c;
-
- /* Erase first transition. */
- c &= -lsb;
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ rtx reg = gen_reg_rtx (mode);
+ rtx mask, mem, x;
+ int width = GET_MODE_SIZE (inner_mode);
+ int i;
- /* Find the second transition. */
- lsb = c & -c;
+ /* Load single variable value. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ reg, mem),
+ x)));
+
+ /* Linear sequence. */
+ mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
+ for (i = 0; i < 16; ++i)
+ XVECEXP (mask, 0, i) = GEN_INT (i);
+
+ /* Set permute mask to insert element into target. */
+ for (i = 0; i < width; ++i)
+ XVECEXP (mask, 0, elt*width + i)
+ = GEN_INT (i + 0x10);
+ x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
+ x = gen_rtx_UNSPEC (mode,
+ gen_rtvec (3, target, reg,
+ force_reg (V16QImode, x)),
+ UNSPEC_VPERM);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+}
+
+/* Extract field ELT from VEC into TARGET. */
- /* Invert to look for a third transition. */
- c = ~c;
+void
+rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
+{
+ enum machine_mode mode = GET_MODE (vec);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ rtx mem, x;
- /* Erase second transition. */
- c &= -lsb;
+ /* Allocate mode-sized buffer. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
- /* Find the third transition (if any). */
- lsb = c & -c;
+ /* Add offset to field within buffer matching vector element. */
+ mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
- /* Match if all the bits above are 1's (or c is zero). */
- return c == -lsb;
- }
- return 0;
+ /* Store single field into mode-sized buffer. */
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_STVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ mem, vec),
+ x)));
+ emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
}
/* Generates shifts and masks for a pair of rldicl or rldicr insns to
@@ -2182,8 +2509,7 @@ build_mask64_2_operands (rtx in, rtx *out)
unsigned HOST_WIDE_INT c, lsb, m1, m2;
int shift;
- if (GET_CODE (in) != CONST_INT)
- abort ();
+ gcc_assert (GET_CODE (in) == CONST_INT);
c = INTVAL (in);
if (c & 1)
@@ -2239,203 +2565,73 @@ build_mask64_2_operands (rtx in, rtx *out)
#else
(void)in;
(void)out;
- abort ();
+ gcc_unreachable ();
#endif
}
-/* Return 1 if the operand is either a non-special register or a constant
- that can be used as the operand of a PowerPC64 logical AND insn. */
-
-int
-and64_operand (rtx op, enum machine_mode mode)
-{
- if (fixed_regs[CR0_REGNO]) /* CR0 not available, don't do andi./andis. */
- return (gpc_reg_operand (op, mode) || mask64_operand (op, mode));
-
- return (logical_operand (op, mode) || mask64_operand (op, mode));
-}
-
-/* Like the above, but also match constants that can be implemented
- with two rldicl or rldicr insns. */
-
-int
-and64_2_operand (rtx op, enum machine_mode mode)
-{
- if (fixed_regs[CR0_REGNO]) /* CR0 not available, don't do andi./andis. */
- return gpc_reg_operand (op, mode) || mask64_2_operand (op, mode);
-
- return logical_operand (op, mode) || mask64_2_operand (op, mode);
-}
-
-/* Return 1 if the operand is either a non-special register or a
- constant that can be used as the operand of an RS/6000 logical AND insn. */
-
-int
-and_operand (rtx op, enum machine_mode mode)
-{
- if (fixed_regs[CR0_REGNO]) /* CR0 not available, don't do andi./andis. */
- return (gpc_reg_operand (op, mode) || mask_operand (op, mode));
-
- return (logical_operand (op, mode) || mask_operand (op, mode));
-}
-
-/* Return 1 if the operand is a general register or memory operand. */
-
-int
-reg_or_mem_operand (rtx op, enum machine_mode mode)
-{
- return (gpc_reg_operand (op, mode)
- || memory_operand (op, mode)
- || macho_lo_sum_memory_operand (op, mode)
- || volatile_mem_operand (op, mode));
-}
-
-/* Return 1 if the operand is a general register or memory operand without
- pre_inc or pre_dec which produces invalid form of PowerPC lwa
- instruction. */
-
-int
-lwa_operand (rtx op, enum machine_mode mode)
-{
- rtx inner = op;
-
- if (reload_completed && GET_CODE (inner) == SUBREG)
- inner = SUBREG_REG (inner);
-
- return gpc_reg_operand (inner, mode)
- || (memory_operand (inner, mode)
- && GET_CODE (XEXP (inner, 0)) != PRE_INC
- && GET_CODE (XEXP (inner, 0)) != PRE_DEC
- && (GET_CODE (XEXP (inner, 0)) != PLUS
- || GET_CODE (XEXP (XEXP (inner, 0), 1)) != CONST_INT
- || INTVAL (XEXP (XEXP (inner, 0), 1)) % 4 == 0));
-}
-
-/* Return 1 if the operand, used inside a MEM, is a SYMBOL_REF. */
-
-int
-symbol_ref_operand (rtx op, enum machine_mode mode)
-{
- if (mode != VOIDmode && GET_MODE (op) != mode)
- return 0;
-
- return (GET_CODE (op) == SYMBOL_REF
- && (DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op)));
-}
-
-/* Return 1 if the operand, used inside a MEM, is a valid first argument
- to CALL. This is a SYMBOL_REF, a pseudo-register, LR or CTR. */
-
-int
-call_operand (rtx op, enum machine_mode mode)
-{
- if (mode != VOIDmode && GET_MODE (op) != mode)
- return 0;
-
- return (GET_CODE (op) == SYMBOL_REF
- || (GET_CODE (op) == REG
- && (REGNO (op) == LINK_REGISTER_REGNUM
- || REGNO (op) == COUNT_REGISTER_REGNUM
- || REGNO (op) >= FIRST_PSEUDO_REGISTER)));
-}
-
-/* Return 1 if the operand is a SYMBOL_REF for a function known to be in
- this file. */
-
-int
-current_file_function_operand (rtx op,
- enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return (GET_CODE (op) == SYMBOL_REF
- && (DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))
- && (SYMBOL_REF_LOCAL_P (op)
- || (op == XEXP (DECL_RTL (current_function_decl), 0))));
-}
-
-/* Return 1 if this operand is a valid input for a move insn. */
+/* Return TRUE if OP is an invalid SUBREG operation on the e500. */
-int
-input_operand (rtx op, enum machine_mode mode)
+bool
+invalid_e500_subreg (rtx op, enum machine_mode mode)
{
- /* Memory is always valid. */
- if (memory_operand (op, mode))
- return 1;
-
- /* Only a tiny bit of handling for CONSTANT_P_RTX is necessary. */
- if (GET_CODE (op) == CONSTANT_P_RTX)
- return 1;
-
- /* For floating-point, easy constants are valid. */
- if (GET_MODE_CLASS (mode) == MODE_FLOAT
- && CONSTANT_P (op)
- && easy_fp_constant (op, mode))
- return 1;
-
- /* Allow any integer constant. */
- if (GET_MODE_CLASS (mode) == MODE_INT
- && (GET_CODE (op) == CONST_INT
- || GET_CODE (op) == CONST_DOUBLE))
- return 1;
-
- /* Allow easy vector constants. */
- if (GET_CODE (op) == CONST_VECTOR
- && easy_vector_constant (op, mode))
- return 1;
-
- /* For floating-point or multi-word mode, the only remaining valid type
- is a register. */
- if (GET_MODE_CLASS (mode) == MODE_FLOAT
- || GET_MODE_SIZE (mode) > UNITS_PER_WORD)
- return register_operand (op, mode);
-
- /* The only cases left are integral modes one word or smaller (we
- do not get called for MODE_CC values). These can be in any
- register. */
- if (register_operand (op, mode))
- return 1;
-
- /* A SYMBOL_REF referring to the TOC is valid. */
- if (legitimate_constant_pool_address_p (op))
- return 1;
+ if (TARGET_E500_DOUBLE)
+ {
+ /* Reject (subreg:SI (reg:DF)). */
+ if (GET_CODE (op) == SUBREG
+ && mode == SImode
+ && REG_P (SUBREG_REG (op))
+ && GET_MODE (SUBREG_REG (op)) == DFmode)
+ return true;
- /* A constant pool expression (relative to the TOC) is valid */
- if (toc_relative_expr_p (op))
- return 1;
+ /* Reject (subreg:DF (reg:DI)). */
+ if (GET_CODE (op) == SUBREG
+ && mode == DFmode
+ && REG_P (SUBREG_REG (op))
+ && GET_MODE (SUBREG_REG (op)) == DImode)
+ return true;
+ }
- /* V.4 allows SYMBOL_REFs and CONSTs that are in the small data region
- to be valid. */
- if (DEFAULT_ABI == ABI_V4
- && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)
- && small_data_operand (op, Pmode))
- return 1;
+ if (TARGET_SPE
+ && GET_CODE (op) == SUBREG
+ && mode == SImode
+ && REG_P (SUBREG_REG (op))
+ && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
+ return true;
- return 0;
+ return false;
}
-
/* Darwin, AIX increases natural record alignment to doubleword if the first
field is an FP double while the FP fields remain word aligned. */
unsigned int
-rs6000_special_round_type_align (tree type, int computed, int specified)
+rs6000_special_round_type_align (tree type, unsigned int computed,
+ unsigned int specified)
{
+ unsigned int align = MAX (computed, specified);
tree field = TYPE_FIELDS (type);
- /* Skip all the static variables only if ABI is greater than
- 1 or equal to 0. */
- while (field != NULL && TREE_CODE (field) == VAR_DECL)
+ /* Skip all non field decls */
+ while (field != NULL && TREE_CODE (field) != FIELD_DECL)
field = TREE_CHAIN (field);
- if (field == NULL || field == type || DECL_MODE (field) != DFmode)
- return MAX (computed, specified);
+ if (field != NULL && field != type)
+ {
+ type = TREE_TYPE (field);
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
- return MAX (MAX (computed, specified), 64);
+ if (type != error_mark_node && TYPE_MODE (type) == DFmode)
+ align = MAX (align, 64);
+ }
+
+ return align;
}
/* Return 1 for an operand in small memory on V.4/eabi. */
int
-small_data_operand (rtx op ATTRIBUTE_UNUSED,
+small_data_operand (rtx op ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
#if TARGET_ELF
@@ -2462,10 +2658,10 @@ small_data_operand (rtx op ATTRIBUTE_UNUSED,
HOST_WIDE_INT summand;
/* We have to be careful here, because it is the referenced address
- that must be 32k from _SDA_BASE_, not just the symbol. */
+ that must be 32k from _SDA_BASE_, not just the symbol. */
summand = INTVAL (XEXP (sum, 1));
if (summand < 0 || (unsigned HOST_WIDE_INT) summand > g_switch_value)
- return 0;
+ return 0;
sym_ref = XEXP (sum, 0);
}
@@ -2476,27 +2672,6 @@ small_data_operand (rtx op ATTRIBUTE_UNUSED,
#endif
}
-/* Return true, if operand is a memory operand and has a
- displacement divisible by 4. */
-
-int
-word_offset_memref_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- rtx addr;
- int off = 0;
-
- if (!memory_operand (op, mode))
- return 0;
-
- addr = XEXP (op, 0);
- if (GET_CODE (addr) == PLUS
- && GET_CODE (XEXP (addr, 0)) == REG
- && GET_CODE (XEXP (addr, 1)) == CONST_INT)
- off = INTVAL (XEXP (addr, 1));
-
- return (off % 4) == 0;
-}
-
/* Return true if either operand is a general purpose register. */
bool
@@ -2509,10 +2684,10 @@ gpr_or_gpr_p (rtx op0, rtx op1)
/* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address. */
-static int
-constant_pool_expr_1 (rtx op, int *have_sym, int *have_toc)
+static int
+constant_pool_expr_1 (rtx op, int *have_sym, int *have_toc)
{
- switch (GET_CODE(op))
+ switch (GET_CODE (op))
{
case SYMBOL_REF:
if (RS6000_SYMBOL_REF_TLS_P (op))
@@ -2555,7 +2730,7 @@ constant_pool_expr_p (rtx op)
return constant_pool_expr_1 (op, &have_sym, &have_toc) && have_sym;
}
-static bool
+bool
toc_relative_expr_p (rtx op)
{
int have_sym = 0;
@@ -2563,9 +2738,6 @@ toc_relative_expr_p (rtx op)
return constant_pool_expr_1 (op, &have_sym, &have_toc) && have_toc;
}
-/* SPE offset addressing is limited to 5-bits worth of double words. */
-#define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
-
bool
legitimate_constant_pool_address_p (rtx x)
{
@@ -2585,8 +2757,11 @@ legitimate_small_data_p (enum machine_mode mode, rtx x)
&& small_data_operand (x, mode));
}
-static bool
-legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
+/* SPE offset addressing is limited to 5-bits worth of double words. */
+#define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
+
+bool
+rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
{
unsigned HOST_WIDE_INT offset, extra;
@@ -2596,6 +2771,8 @@ legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
return false;
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
return false;
+ if (legitimate_constant_pool_address_p (x))
+ return true;
if (GET_CODE (XEXP (x, 1)) != CONST_INT)
return false;
@@ -2607,10 +2784,10 @@ legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
case V8HImode:
case V4SFmode:
case V4SImode:
- /* AltiVec vector modes. Only reg+reg addressing is valid here,
- which leaves the only valid constant offset of zero, which by
- canonicalization rules is also invalid. */
- return false;
+ /* AltiVec vector modes. Only reg+reg addressing is valid and
+ constant offset zero should not occur due to canonicalization.
+ Allow any offset when not strict before reload. */
+ return !strict;
case V4HImode:
case V2SImode:
@@ -2620,7 +2797,18 @@ legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
return SPE_CONST_OFFSET_OK (offset);
case DFmode:
+ if (TARGET_E500_DOUBLE)
+ return SPE_CONST_OFFSET_OK (offset);
+
case DImode:
+ /* On e500v2, we may have:
+
+ (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
+
+ Which gets addressed with evldd instructions. */
+ if (TARGET_E500_DOUBLE)
+ return SPE_CONST_OFFSET_OK (offset);
+
if (mode == DFmode || !TARGET_POWERPC64)
extra = 4;
else if (offset & 3)
@@ -2652,31 +2840,38 @@ legitimate_indexed_address_p (rtx x, int strict)
if (GET_CODE (x) != PLUS)
return false;
+
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
- if (!REG_P (op0) || !REG_P (op1))
- return false;
+ /* Recognize the rtl generated by reload which we know will later be
+ replaced with proper base and index regs. */
+ if (!strict
+ && reload_in_progress
+ && (REG_P (op0) || GET_CODE (op0) == PLUS)
+ && REG_P (op1))
+ return true;
- return ((INT_REG_OK_FOR_BASE_P (op0, strict)
- && INT_REG_OK_FOR_INDEX_P (op1, strict))
- || (INT_REG_OK_FOR_BASE_P (op1, strict)
- && INT_REG_OK_FOR_INDEX_P (op0, strict)));
+ return (REG_P (op0) && REG_P (op1)
+ && ((INT_REG_OK_FOR_BASE_P (op0, strict)
+ && INT_REG_OK_FOR_INDEX_P (op1, strict))
+ || (INT_REG_OK_FOR_BASE_P (op1, strict)
+ && INT_REG_OK_FOR_INDEX_P (op0, strict))));
}
-static inline bool
+inline bool
legitimate_indirect_address_p (rtx x, int strict)
{
return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
}
-static bool
+bool
macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
{
- if (!TARGET_MACHO || !flag_pic
- || mode != SImode || GET_CODE(x) != MEM)
- return false;
- x = XEXP (x, 0);
+ if (!TARGET_MACHO || !flag_pic
+ || mode != SImode || GET_CODE (x) != MEM)
+ return false;
+ x = XEXP (x, 0);
if (GET_CODE (x) != LO_SUM)
return false;
@@ -2698,6 +2893,9 @@ legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
return false;
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
return false;
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == DImode))
+ return false;
x = XEXP (x, 1);
if (TARGET_ELF || TARGET_MACHO)
@@ -2708,8 +2906,9 @@ legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
return false;
if (GET_MODE_NUNITS (mode) != 1)
return false;
- if (GET_MODE_BITSIZE (mode) > 32
- && !(TARGET_HARD_FLOAT && TARGET_FPRS && mode == DFmode))
+ if (GET_MODE_BITSIZE (mode) > 64
+ || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
+ && !(TARGET_HARD_FLOAT && TARGET_FPRS && mode == DFmode)))
return false;
return CONSTANT_P (x);
@@ -2753,11 +2952,11 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
return rs6000_legitimize_tls_address (x, model);
}
- if (GET_CODE (x) == PLUS
+ if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000)
- {
+ {
HOST_WIDE_INT high_int, low_int;
rtx sum;
low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
@@ -2766,13 +2965,14 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
GEN_INT (high_int)), 0);
return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
}
- else if (GET_CODE (x) == PLUS
+ else if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
&& GET_CODE (XEXP (x, 1)) != CONST_INT
&& GET_MODE_NUNITS (mode) == 1
&& ((TARGET_HARD_FLOAT && TARGET_FPRS)
|| TARGET_POWERPC64
- || (mode != DFmode && mode != TFmode))
+ || (((mode != DImode && mode != DFmode) || TARGET_E500_DOUBLE)
+ && mode != TFmode))
&& (TARGET_POWERPC64 || mode != DImode)
&& mode != TImode)
{
@@ -2791,24 +2991,28 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
reg = force_reg (Pmode, x);
return reg;
}
- else if (SPE_VECTOR_MODE (mode))
+ else if (SPE_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && (mode == DFmode
+ || mode == DImode)))
{
+ if (mode == DImode)
+ return NULL_RTX;
/* We accept [reg + reg] and [reg + OFFSET]. */
if (GET_CODE (x) == PLUS)
- {
- rtx op1 = XEXP (x, 0);
- rtx op2 = XEXP (x, 1);
+ {
+ rtx op1 = XEXP (x, 0);
+ rtx op2 = XEXP (x, 1);
- op1 = force_reg (Pmode, op1);
+ op1 = force_reg (Pmode, op1);
- if (GET_CODE (op2) != REG
- && (GET_CODE (op2) != CONST_INT
- || !SPE_CONST_OFFSET_OK (INTVAL (op2))))
- op2 = force_reg (Pmode, op2);
+ if (GET_CODE (op2) != REG
+ && (GET_CODE (op2) != CONST_INT
+ || !SPE_CONST_OFFSET_OK (INTVAL (op2))))
+ op2 = force_reg (Pmode, op2);
- return gen_rtx_PLUS (Pmode, op1, op2);
- }
+ return gen_rtx_PLUS (Pmode, op1, op2);
+ }
return force_reg (Pmode, x);
}
@@ -2817,7 +3021,7 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
&& TARGET_NO_TOC
&& ! flag_pic
&& GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& GET_MODE_NUNITS (mode) == 1
&& (GET_MODE_BITSIZE (mode) <= 32
@@ -2833,17 +3037,17 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
&& ! MACHO_DYNAMIC_NO_PIC_P
#endif
&& GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& ((TARGET_HARD_FLOAT && TARGET_FPRS) || mode != DFmode)
- && mode != DImode
+ && mode != DImode
&& mode != TImode)
{
rtx reg = gen_reg_rtx (Pmode);
emit_insn (gen_macho_high (reg, x));
return gen_rtx_LO_SUM (Pmode, reg, x);
}
- else if (TARGET_TOC
+ else if (TARGET_TOC
&& constant_pool_expr_p (x)
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
{
@@ -2853,10 +3057,10 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
return NULL_RTX;
}
-/* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
+/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
We need to emit DTP-relative relocations. */
-void
+static void
rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
{
switch (size)
@@ -2868,7 +3072,7 @@ rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
fputs (DOUBLE_INT_ASM_OP, file);
break;
default:
- abort ();
+ gcc_unreachable ();
}
output_addr_const (file, x);
fputs ("@dtprel+0x8000", file);
@@ -2897,7 +3101,7 @@ rs6000_got_sym (void)
rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
- }
+ }
return rs6000_got_symbol;
}
@@ -2953,6 +3157,11 @@ rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
{
rtx r3, got, tga, tmp1, tmp2, eqv;
+ /* We currently use relocations like @got@tlsgd for tls, which
+ means the linker will handle allocation of tls entries, placing
+ them in the .got section. So use a pointer to the .got section,
+ not one to secondary TOC sections used by 64-bit -mminimal-toc,
+ or to secondary GOT sections used by 32-bit -fPIC. */
if (TARGET_64BIT)
got = gen_rtx_REG (Pmode, 2);
else
@@ -2974,8 +3183,7 @@ rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
tmp1 = gen_reg_rtx (Pmode);
tmp2 = gen_reg_rtx (Pmode);
tmp3 = gen_reg_rtx (Pmode);
- mem = gen_rtx_MEM (Pmode, tmp1);
- RTX_UNCHANGING_P (mem) = 1;
+ mem = gen_const_mem (Pmode, tmp1);
first = emit_insn (gen_load_toc_v4_PIC_1b (tempLR, gsym));
emit_move_insn (tmp1, tempLR);
@@ -3083,15 +3291,6 @@ rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
return dest;
}
-/* Return 1 if X is a SYMBOL_REF for a TLS symbol. This is used in
- instruction definitions. */
-
-int
-rs6000_tls_symbol_ref (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return RS6000_SYMBOL_REF_TLS_P (x);
-}
-
/* Return 1 if X contains a thread-local symbol. */
bool
@@ -3106,7 +3305,7 @@ rs6000_tls_referenced_p (rtx x)
/* Return 1 if *X is a thread-local symbol. This is the same as
rs6000_tls_symbol_ref except for the type of the unused argument. */
-static inline int
+static int
rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
{
return RS6000_SYMBOL_REF_TLS_P (*x);
@@ -3133,10 +3332,11 @@ rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
The Darwin code is inside #if TARGET_MACHO because only then is
machopic_function_base_name() defined. */
rtx
-rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
- int opnum, int type, int ind_levels ATTRIBUTE_UNUSED, int *win)
+rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
+ int opnum, int type,
+ int ind_levels ATTRIBUTE_UNUSED, int *win)
{
- /* We must recognize output that we have already generated ourselves. */
+ /* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
@@ -3144,8 +3344,8 @@ rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
{
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
*win = 1;
return x;
}
@@ -3165,8 +3365,8 @@ rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
/* Result of previous invocation of this function on Darwin
floating point constant. */
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
*win = 1;
return x;
}
@@ -3180,6 +3380,7 @@ rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
&& REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& (INTVAL (XEXP (x, 1)) & 3) != 0
+ && !ALTIVEC_VECTOR_MODE (mode)
&& GET_MODE_SIZE (mode) >= UNITS_PER_WORD
&& TARGET_POWERPC64)
{
@@ -3197,56 +3398,69 @@ rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
&& REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& !SPE_VECTOR_MODE (mode)
+ && !(TARGET_E500_DOUBLE && (mode == DFmode
+ || mode == DImode))
&& !ALTIVEC_VECTOR_MODE (mode))
{
HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
HOST_WIDE_INT high
- = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
/* Check for 32-bit overflow. */
if (high + low != val)
- {
+ {
*win = 0;
return x;
}
/* Reload the high part into a base reg; leave the low part
- in the mem directly. */
+ in the mem directly. */
x = gen_rtx_PLUS (GET_MODE (x),
- gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
- GEN_INT (high)),
- GEN_INT (low));
+ gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+ GEN_INT (high)),
+ GEN_INT (low));
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
*win = 1;
return x;
}
-#if TARGET_MACHO
if (GET_CODE (x) == SYMBOL_REF
- && DEFAULT_ABI == ABI_DARWIN
&& !ALTIVEC_VECTOR_MODE (mode)
+ && !SPE_VECTOR_MODE (mode)
+#if TARGET_MACHO
+ && DEFAULT_ABI == ABI_DARWIN
&& (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
- /* Don't do this for TFmode, since the result isn't offsettable. */
- && mode != TFmode)
+#else
+ && DEFAULT_ABI == ABI_V4
+ && !flag_pic
+#endif
+ /* Don't do this for TFmode, since the result isn't offsettable.
+ The same goes for DImode without 64-bit gprs and DFmode
+ without fprs. */
+ && mode != TFmode
+ && (mode != DImode || TARGET_POWERPC64)
+ && (mode != DFmode || TARGET_POWERPC64
+ || (TARGET_FPRS && TARGET_HARD_FLOAT)))
{
+#if TARGET_MACHO
if (flag_pic)
{
rtx offset = gen_rtx_CONST (Pmode,
gen_rtx_MINUS (Pmode, x,
- gen_rtx_SYMBOL_REF (Pmode,
- machopic_function_base_name ())));
+ machopic_function_base_sym ()));
x = gen_rtx_LO_SUM (GET_MODE (x),
gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
gen_rtx_HIGH (Pmode, offset)), offset);
}
else
+#endif
x = gen_rtx_LO_SUM (GET_MODE (x),
- gen_rtx_HIGH (Pmode, x), x);
+ gen_rtx_HIGH (Pmode, x), x);
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
@@ -3254,19 +3468,35 @@ rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
*win = 1;
return x;
}
-#endif
+
+ /* Reload an offset address wrapped by an AND that represents the
+ masking of the lower bits. Strip the outer AND and let reload
+ convert the offset address into an indirect address. */
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -16)
+ {
+ x = XEXP (x, 0);
+ *win = 1;
+ return x;
+ }
if (TARGET_TOC
&& constant_pool_expr_p (x)
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
{
- (x) = create_TOC_reference (x);
+ x = create_TOC_reference (x);
*win = 1;
return x;
}
*win = 0;
return x;
-}
+}
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
@@ -3282,12 +3512,20 @@ rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
word aligned.
For modes spanning multiple registers (DFmode in 32-bit GPRs,
- 32-bit DImode, TImode), indexed addressing cannot be used because
+ 32-bit DImode, TImode, TFmode), indexed addressing cannot be used because
adjacent memory cells are accessed by adding word-sized offsets
during assembly output. */
int
rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
{
+ /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -16)
+ x = XEXP (x, 0);
+
if (RS6000_SYMBOL_REF_TLS_P (x))
return 0;
if (legitimate_indirect_address_p (x, reg_ok_strict))
@@ -3295,6 +3533,9 @@ rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
&& !ALTIVEC_VECTOR_MODE (mode)
&& !SPE_VECTOR_MODE (mode)
+ && mode != TFmode
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == DImode))
&& TARGET_UPDATE
&& legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
return 1;
@@ -3310,12 +3551,13 @@ rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
|| XEXP (x, 0) == arg_pointer_rtx)
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
return 1;
- if (legitimate_offset_address_p (mode, x, reg_ok_strict))
+ if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
return 1;
if (mode != TImode
+ && mode != TFmode
&& ((TARGET_HARD_FLOAT && TARGET_FPRS)
|| TARGET_POWERPC64
- || (mode != DFmode && mode != TFmode))
+ || ((mode != DFmode || TARGET_E500_DOUBLE) && mode != TFmode))
&& (TARGET_POWERPC64 || mode != DImode)
&& legitimate_indexed_address_p (x, reg_ok_strict))
return 1;
@@ -3332,7 +3574,7 @@ rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
??? Except that due to conceptual problems in offsettable_address_p
we can't really report the problems of integral offsets. So leave
- this assuming that the adjustable offset must be valid for the
+ this assuming that the adjustable offset must be valid for the
sub-words of a TFmode operand, which is what we had before. */
bool
@@ -3361,6 +3603,142 @@ rs6000_mode_dependent_address (rtx addr)
return false;
}
+
+/* More elaborate version of recog's offsettable_memref_p predicate
+ that works around the ??? note of rs6000_mode_dependent_address.
+ In particular it accepts
+
+ (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
+
+ in 32-bit mode, that the recog predicate rejects. */
+
+bool
+rs6000_offsettable_memref_p (rtx op)
+{
+ if (!MEM_P (op))
+ return false;
+
+ /* First mimic offsettable_memref_p. */
+ if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
+ return true;
+
+ /* offsettable_address_p invokes rs6000_mode_dependent_address, but
+ the latter predicate knows nothing about the mode of the memory
+ reference and, therefore, assumes that it is the largest supported
+ mode (TFmode). As a consequence, legitimate offsettable memory
+ references are rejected. rs6000_legitimate_offset_address_p contains
+ the correct logic for the PLUS case of rs6000_mode_dependent_address. */
+ return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ For the SPE, GPRs are 64 bits but only 32 bits are visible in
+ scalar instructions. The upper 32 bits are only available to the
+ SIMD instructions.
+
+ POWER and PowerPC GPRs hold 32 bits worth;
+ PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
+
+int
+rs6000_hard_regno_nregs (int regno, enum machine_mode mode)
+{
+ if (FP_REGNO_P (regno))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+
+ if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_SPE_WORD - 1) / UNITS_PER_SPE_WORD;
+
+ if (ALTIVEC_REGNO_P (regno))
+ return
+ (GET_MODE_SIZE (mode) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD;
+
+ /* The value returned for SCmode in the E500 double case is 2 for
+ ABI compatibility; storing an SCmode value in a single register
+ would require function_arg and rs6000_spe_function_arg to handle
+ SCmode so as to pass the value correctly in a pair of
+ registers. */
+ if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode)
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+}
+
+/* Change register usage conditional on target flags. */
+void
+rs6000_conditional_register_usage (void)
+{
+ int i;
+
+ /* Set MQ register fixed (already call_used) if not POWER
+ architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
+ be allocated. */
+ if (! TARGET_POWER)
+ fixed_regs[64] = 1;
+
+ /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
+ if (TARGET_64BIT)
+ fixed_regs[13] = call_used_regs[13]
+ = call_really_used_regs[13] = 1;
+
+ /* Conditionally disable FPRs. */
+ if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ for (i = 32; i < 64; i++)
+ fixed_regs[i] = call_used_regs[i]
+ = call_really_used_regs[i] = 1;
+
+ /* The TOC register is not killed across calls in a way that is
+ visible to the compiler. */
+ if (DEFAULT_ABI == ABI_AIX)
+ call_really_used_regs[2] = 0;
+
+ if (DEFAULT_ABI == ABI_V4
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && flag_pic == 2)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (DEFAULT_ABI == ABI_V4
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && flag_pic == 1)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (DEFAULT_ABI == ABI_DARWIN
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (TARGET_TOC && TARGET_MINIMAL_TOC)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (TARGET_ALTIVEC)
+ global_regs[VSCR_REGNO] = 1;
+
+ if (TARGET_SPE)
+ {
+ global_regs[SPEFSCR_REGNO] = 1;
+ fixed_regs[FIXED_SCRATCH]
+ = call_used_regs[FIXED_SCRATCH]
+ = call_really_used_regs[FIXED_SCRATCH] = 1;
+ }
+
+ if (! TARGET_ALTIVEC)
+ {
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
+ call_really_used_regs[VRSAVE_REGNO] = 1;
+ }
+
+ if (TARGET_ALTIVEC_ABI)
+ for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
+ call_used_regs[i] = call_really_used_regs[i] = 1;
+}
/* Try to output insns to set TARGET equal to the constant C if it can
be done in less than N insns. Do all computations in MODE.
@@ -3369,21 +3747,22 @@ rs6000_mode_dependent_address (rtx addr)
insns, zero is returned and no insns and emitted. */
rtx
-rs6000_emit_set_const (rtx dest, enum machine_mode mode,
+rs6000_emit_set_const (rtx dest, enum machine_mode mode,
rtx source, int n ATTRIBUTE_UNUSED)
{
rtx result, insn, set;
HOST_WIDE_INT c0, c1;
- if (mode == QImode || mode == HImode)
+ switch (mode)
{
+ case QImode:
+ case HImode:
if (dest == NULL)
- dest = gen_reg_rtx (mode);
+ dest = gen_reg_rtx (mode);
emit_insn (gen_rtx_SET (VOIDmode, dest, source));
return dest;
- }
- else if (mode == SImode)
- {
+
+ case SImode:
result = no_new_pseudos ? dest : gen_reg_rtx (SImode);
emit_insn (gen_rtx_SET (VOIDmode, result,
@@ -3393,16 +3772,17 @@ rs6000_emit_set_const (rtx dest, enum machine_mode mode,
gen_rtx_IOR (SImode, result,
GEN_INT (INTVAL (source) & 0xffff))));
result = dest;
- }
- else if (mode == DImode)
- {
- if (GET_CODE (source) == CONST_INT)
+ break;
+
+ case DImode:
+ switch (GET_CODE (source))
{
+ case CONST_INT:
c0 = INTVAL (source);
c1 = -(c0 < 0);
- }
- else if (GET_CODE (source) == CONST_DOUBLE)
- {
+ break;
+
+ case CONST_DOUBLE:
#if HOST_BITS_PER_WIDE_INT >= 64
c0 = CONST_DOUBLE_LOW (source);
c1 = -(c0 < 0);
@@ -3410,14 +3790,18 @@ rs6000_emit_set_const (rtx dest, enum machine_mode mode,
c0 = CONST_DOUBLE_LOW (source);
c1 = CONST_DOUBLE_HIGH (source);
#endif
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
result = rs6000_emit_set_long_const (dest, c0, c1);
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
insn = get_last_insn ();
set = single_set (insn);
@@ -3457,7 +3841,7 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
ud3 = c2 & 0xffff;
ud4 = (c2 & 0xffff0000) >> 16;
- if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
+ if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
|| (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
{
if (ud1 & 0x8000)
@@ -3466,22 +3850,22 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
emit_move_insn (dest, GEN_INT (ud1));
}
- else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
+ else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
|| (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
{
if (ud2 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud2 << 16));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
- else if ((ud4 == 0xffff && (ud3 & 0x8000))
+ else if ((ud4 == 0xffff && (ud3 & 0x8000))
|| (ud4 == 0 && ! (ud3 & 0x8000)))
{
if (ud3 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud3 << 16));
@@ -3492,10 +3876,10 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
- else
+ else
{
if (ud4 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud4 << 16));
@@ -3505,8 +3889,8 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (32)));
if (ud2 != 0)
- emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
- GEN_INT (ud2 << 16)));
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
+ GEN_INT (ud2 << 16)));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
@@ -3514,6 +3898,29 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
return dest;
}
+/* Helper for the following. Get rid of [r+r] memory refs
+ in cases where it won't work (TImode, TFmode). */
+
+static void
+rs6000_eliminate_indexed_memrefs (rtx operands[2])
+{
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (XEXP (operands[0], 0)) != REG
+ && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0))
+ && ! reload_in_progress)
+ operands[0]
+ = replace_equiv_address (operands[0],
+ copy_addr_to_reg (XEXP (operands[0], 0)));
+
+ if (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) != REG
+ && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0))
+ && ! reload_in_progress)
+ operands[1]
+ = replace_equiv_address (operands[1],
+ copy_addr_to_reg (XEXP (operands[1], 0)));
+}
+
/* Emit a move from SOURCE to DEST in mode MODE. */
void
rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
@@ -3521,7 +3928,7 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
rtx operands[2];
operands[0] = dest;
operands[1] = source;
-
+
/* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
if (GET_CODE (operands[1]) == CONST_DOUBLE
&& ! FLOAT_MODE_P (mode)
@@ -3532,13 +3939,12 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
to a CONST_INT. */
operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
}
- if (GET_CODE (operands[1]) == CONST_DOUBLE
- && ! FLOAT_MODE_P (mode)
- && ((CONST_DOUBLE_HIGH (operands[1]) == 0
- && CONST_DOUBLE_LOW (operands[1]) >= 0)
- || (CONST_DOUBLE_HIGH (operands[1]) == -1
- && CONST_DOUBLE_LOW (operands[1]) < 0)))
- abort ();
+ gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
+ || FLOAT_MODE_P (mode)
+ || ((CONST_DOUBLE_HIGH (operands[1]) != 0
+ || CONST_DOUBLE_LOW (operands[1]) < 0)
+ && (CONST_DOUBLE_HIGH (operands[1]) != -1
+ || CONST_DOUBLE_LOW (operands[1]) >= 0)));
/* Check if GCC is setting up a block move that will end up using FP
registers as temporaries. We must make sure this is acceptable. */
@@ -3550,7 +3956,7 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
&& ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
? 32 : MEM_ALIGN (operands[0])))
|| SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
- ? 32
+ ? 32
: MEM_ALIGN (operands[1]))))
&& ! MEM_VOLATILE_P (operands [0])
&& ! MEM_VOLATILE_P (operands [1]))
@@ -3561,23 +3967,10 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
adjust_address (operands[1], SImode, 4));
return;
}
-
- if (!no_new_pseudos)
- {
- if (GET_CODE (operands[1]) == MEM && optimize > 0
- && (mode == QImode || mode == HImode || mode == SImode)
- && GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode))
- {
- rtx reg = gen_reg_rtx (word_mode);
- emit_insn (gen_rtx_SET (word_mode, reg,
- gen_rtx_ZERO_EXTEND (word_mode,
- operands[1])));
- operands[1] = gen_lowpart (mode, reg);
- }
- if (GET_CODE (operands[0]) != REG)
- operands[1] = force_reg (mode, operands[1]);
- }
+ if (!no_new_pseudos && GET_CODE (operands[0]) == MEM
+ && !gpc_reg_operand (operands[1], mode))
+ operands[1] = force_reg (mode, operands[1]);
if (mode == SFmode && ! TARGET_POWERPC
&& TARGET_HARD_FLOAT && TARGET_FPRS
@@ -3591,7 +3984,7 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
regnum = REGNO (operands[1]);
else
regnum = -1;
-
+
/* If operands[1] is a register, on POWER it may have
double-precision data in it, so truncate it to single
precision. */
@@ -3606,11 +3999,29 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
/* Recognize the case where operand[1] is a reference to thread-local
data and load its address to a register. */
- if (GET_CODE (operands[1]) == SYMBOL_REF)
+ if (rs6000_tls_referenced_p (operands[1]))
{
- enum tls_model model = SYMBOL_REF_TLS_MODEL (operands[1]);
- if (model != 0)
- operands[1] = rs6000_legitimize_tls_address (operands[1], model);
+ enum tls_model model;
+ rtx tmp = operands[1];
+ rtx addend = NULL;
+
+ if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
+ {
+ addend = XEXP (XEXP (tmp, 0), 1);
+ tmp = XEXP (XEXP (tmp, 0), 0);
+ }
+
+ gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
+ model = SYMBOL_REF_TLS_MODEL (tmp);
+ gcc_assert (model != 0);
+
+ tmp = rs6000_legitimize_tls_address (tmp, model);
+ if (addend)
+ {
+ tmp = gen_rtx_PLUS (mode, tmp, addend);
+ tmp = force_operand (tmp, operands[0]);
+ }
+ operands[1] = tmp;
}
/* Handle the case where reload calls us with an invalid address. */
@@ -3619,14 +4030,9 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
|| ! nonimmediate_operand (operands[0], mode)))
goto emit_set;
- /* Handle the case of CONSTANT_P_RTX. */
- if (GET_CODE (operands[1]) == CONSTANT_P_RTX)
- goto emit_set;
-
/* 128-bit constant floating-point values on Darwin should really be
loaded as two parts. */
- if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
- && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128
+ if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
&& mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
{
/* DImode is used, not DFmode, because simplify_gen_subreg doesn't
@@ -3655,13 +4061,16 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
break;
case TFmode:
+ rs6000_eliminate_indexed_memrefs (operands);
+ /* fall through */
+
case DFmode:
case SFmode:
- if (CONSTANT_P (operands[1])
+ if (CONSTANT_P (operands[1])
&& ! easy_fp_constant (operands[1], mode))
operands[1] = force_const_mem (mode, operands[1]);
break;
-
+
case V16QImode:
case V8HImode:
case V4SFmode:
@@ -3674,14 +4083,14 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
&& !easy_vector_constant (operands[1], mode))
operands[1] = force_const_mem (mode, operands[1]);
break;
-
+
case SImode:
case DImode:
/* Use default pattern for address of ELF small data */
if (TARGET_ELF
&& mode == Pmode
&& DEFAULT_ABI == ABI_V4
- && (GET_CODE (operands[1]) == SYMBOL_REF
+ && (GET_CODE (operands[1]) == SYMBOL_REF
|| GET_CODE (operands[1]) == CONST)
&& small_data_operand (operands[1], mode))
{
@@ -3722,7 +4131,7 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
= CONSTANT_POOL_ADDRESS_P (operands[1]);
SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
- SYMBOL_REF_DECL (new_ref) = SYMBOL_REF_DECL (operands[1]);
+ SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
operands[1] = new_ref;
}
@@ -3736,7 +4145,7 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
operands[1], mode, operands[0]);
if (operands[0] != operands[1])
emit_insn (gen_rtx_SET (VOIDmode,
- operands[0], operands[1]));
+ operands[0], operands[1]));
return;
}
#endif
@@ -3821,35 +4230,23 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
operands[1] = force_const_mem (mode, operands[1]);
- if (TARGET_TOC
+ if (TARGET_TOC
&& constant_pool_expr_p (XEXP (operands[1], 0))
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
get_pool_constant (XEXP (operands[1], 0)),
get_pool_mode (XEXP (operands[1], 0))))
{
operands[1]
- = gen_rtx_MEM (mode,
- create_TOC_reference (XEXP (operands[1], 0)));
+ = gen_const_mem (mode,
+ create_TOC_reference (XEXP (operands[1], 0)));
set_mem_alias_set (operands[1], get_TOC_alias_set ());
- RTX_UNCHANGING_P (operands[1]) = 1;
}
}
break;
case TImode:
- if (GET_CODE (operands[0]) == MEM
- && GET_CODE (XEXP (operands[0], 0)) != REG
- && ! reload_in_progress)
- operands[0]
- = replace_equiv_address (operands[0],
- copy_addr_to_reg (XEXP (operands[0], 0)));
-
- if (GET_CODE (operands[1]) == MEM
- && GET_CODE (XEXP (operands[1], 0)) != REG
- && ! reload_in_progress)
- operands[1]
- = replace_equiv_address (operands[1],
- copy_addr_to_reg (XEXP (operands[1], 0)));
+ rs6000_eliminate_indexed_memrefs (operands);
+
if (TARGET_POWER)
{
emit_insn (gen_rtx_PARALLEL (VOIDmode,
@@ -3863,7 +4260,7 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
break;
default:
- abort ();
+ gcc_unreachable ();
}
/* Above, we may have called force_const_mem which may have returned
@@ -3878,7 +4275,8 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
/* Nonzero if we can use a floating-point register to pass this arg. */
#define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
- (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ (SCALAR_FLOAT_MODE_P (MODE) \
+ && !DECIMAL_FLOAT_MODE_P (MODE) \
&& (CUM)->fregno <= FP_ARG_MAX_REG \
&& TARGET_HARD_FLOAT && TARGET_FPRS)
@@ -3898,7 +4296,7 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
returned in memory. The Darwin ABI does the same. The SVR4 ABI
specifies that structures <= 8 bytes are returned in r3/r4, but a
draft put them in memory, and GCC used to implement the draft
- instead of the final standard. Therefore, TARGET_AIX_STRUCT_RET
+ instead of the final standard. Therefore, aix_struct_return
controls this instead of DEFAULT_ABI; V.4 targets needing backward
compatibility can change DRAFT_V4_STRUCT_RET to override the
default, and -m switches get the final word. See
@@ -3913,12 +4311,54 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
static bool
rs6000_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
{
+ /* In the darwin64 abi, try to use registers for larger structs
+ if possible. */
+ if (rs6000_darwin64_abi
+ && TREE_CODE (type) == RECORD_TYPE
+ && int_size_in_bytes (type) > 0)
+ {
+ CUMULATIVE_ARGS valcum;
+ rtx valret;
+
+ valcum.words = 0;
+ valcum.fregno = FP_ARG_MIN_REG;
+ valcum.vregno = ALTIVEC_ARG_MIN_REG;
+ /* Do a trial code generation as if this were going to be passed
+ as an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, type, 1, true);
+ if (valret)
+ return false;
+ /* Otherwise fall through to more conventional ABI rules. */
+ }
+
if (AGGREGATE_TYPE_P (type)
- && (TARGET_AIX_STRUCT_RET
+ && (aix_struct_return
|| (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
return true;
- if (DEFAULT_ABI == ABI_V4 && TYPE_MODE (type) == TFmode)
+
+ /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
+ modes only exist for GCC vector types if -maltivec. */
+ if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
+ return false;
+
+ /* Return synthetic vectors in memory. */
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
+ {
+ static bool warned_for_return_big_vectors = false;
+ if (!warned_for_return_big_vectors)
+ {
+ warning (0, "GCC vector returned by reference: "
+ "non-standard ABI extension with no compatibility guarantee");
+ warned_for_return_big_vectors = true;
+ }
+ return true;
+ }
+
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
return true;
+
return false;
}
@@ -3930,7 +4370,7 @@ rs6000_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
so we never return a PARALLEL. */
void
-init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
+init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
rtx libname ATTRIBUTE_UNUSED, int incoming,
int libcall, int n_named_args)
{
@@ -3976,18 +4416,29 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
fprintf (stderr, " proto = %d, nargs = %d\n",
cum->prototype, cum->nargs_prototype);
}
-
- if (fntype
- && !TARGET_ALTIVEC
- && TARGET_ALTIVEC_ABI
- && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
- {
- error ("Cannot return value in vector register because"
- " altivec instructions are disabled, use -maltivec"
- " to enable them.");
- }
+
+ if (fntype
+ && !TARGET_ALTIVEC
+ && TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
+ {
+ error ("cannot return value in vector register because"
+ " altivec instructions are disabled, use -maltivec"
+ " to enable them");
+ }
}
+/* Return true if TYPE must be passed on the stack and not in registers. */
+
+static bool
+rs6000_must_pass_in_stack (enum machine_mode mode, tree type)
+{
+ if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
+ return must_pass_in_stack_var_size (mode, type);
+ else
+ return must_pass_in_stack_var_size_or_pad (mode, type);
+}
+
/* If defined, a C expression which determines whether, and in which
direction, to pad out an argument with extra space. The value
should be of type `enum direction': either `upward' to pad above
@@ -4011,7 +4462,7 @@ function_arg_padding (enum machine_mode mode, tree type)
{
/* GCC used to pass structures of the same size as integer types as
if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
- ie. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
+ i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
passed padded downward, except that -mstrict-align further
muddied the water in that multi-component structures of 2 and 4
bytes in size were passed padded upward.
@@ -4047,24 +4498,58 @@ function_arg_padding (enum machine_mode mode, tree type)
}
/* If defined, a C expression that gives the alignment boundary, in bits,
- of an argument with the specified mode and type. If it is not defined,
+ of an argument with the specified mode and type. If it is not defined,
PARM_BOUNDARY is used for all arguments.
-
- V.4 wants long longs to be double word aligned. */
+
+ V.4 wants long longs and doubles to be double word aligned. Just
+ testing the mode size is a boneheaded way to do this as it means
+ that other types such as complex int are also double word aligned.
+ However, we're stuck with this because changing the ABI might break
+ existing library interfaces.
+
+ Doubleword align SPE vectors.
+ Quadword align Altivec vectors.
+ Quadword align large synthetic vector types. */
int
-function_arg_boundary (enum machine_mode mode, tree type ATTRIBUTE_UNUSED)
+function_arg_boundary (enum machine_mode mode, tree type)
{
- if (DEFAULT_ABI == ABI_V4 && GET_MODE_SIZE (mode) == 8)
+ if (DEFAULT_ABI == ABI_V4
+ && (GET_MODE_SIZE (mode) == 8
+ || (TARGET_HARD_FLOAT
+ && TARGET_FPRS
+ && mode == TFmode)))
return 64;
- else if (SPE_VECTOR_MODE (mode))
+ else if (SPE_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) >= 8
+ && int_size_in_bytes (type) < 16))
return 64;
- else if (ALTIVEC_VECTOR_MODE (mode))
+ else if (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) >= 16))
+ return 128;
+ else if (rs6000_darwin64_abi && mode == BLKmode
+ && type && TYPE_ALIGN (type) > 64)
return 128;
else
return PARM_BOUNDARY;
}
+/* For a function parm of MODE and TYPE, return the starting word in
+ the parameter area. NWORDS of the parameter area are already used. */
+
+static unsigned int
+rs6000_parm_start (enum machine_mode mode, tree type, unsigned int nwords)
+{
+ unsigned int align;
+ unsigned int parm_offset;
+
+ align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
+ parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
+ return nwords + (-(parm_offset + nwords) & align);
+}
+
/* Compute the size (in words) of a function argument. */
static unsigned long
@@ -4083,6 +4568,89 @@ rs6000_arg_size (enum machine_mode mode, tree type)
return (size + 7) >> 3;
}
+/* Use this to flush pending int fields. */
+
+static void
+rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
+ HOST_WIDE_INT bitpos)
+{
+ unsigned int startbit, endbit;
+ int intregs, intoffset;
+ enum machine_mode mode;
+
+ if (cum->intoffset == -1)
+ return;
+
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
+ {
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
+ }
+ }
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ cum->words += intregs;
+}
+
+/* The darwin64 ABI calls for us to recurse down through structs,
+ looking for elements passed in registers. Unfortunately, we have
+ to track int register count here also because of misalignments
+ in powerpc alignment mode. */
+
+static void
+rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
+ tree type,
+ HOST_WIDE_INT startbitpos)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode;
+ if (ftype == error_mark_node)
+ continue;
+ mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
+ else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
+ {
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
+ }
+ else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
+ {
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
+ cum->vregno++;
+ cum->words += 2;
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
+}
+
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be available.)
@@ -4092,25 +4660,32 @@ rs6000_arg_size (enum machine_mode mode, tree type)
itself. */
void
-function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, int named)
+function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int named, int depth)
{
- cum->nargs_prototype--;
+ int size;
+
+ /* Only tick off an argument if we're not recursing. */
+ if (depth == 0)
+ cum->nargs_prototype--;
- if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ if (TARGET_ALTIVEC_ABI
+ && (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) == 16)))
{
bool stack = false;
if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
- {
+ {
cum->vregno++;
if (!TARGET_ALTIVEC)
- error ("Cannot pass argument in vector register because"
+ error ("cannot pass argument in vector register because"
" altivec instructions are disabled, use -maltivec"
- " to enable them.");
+ " to enable them");
/* PowerPC64 Linux and AIX allocate GPRs for a vector argument
- even if it is going to be passed in a vector register.
+ even if it is going to be passed in a vector register.
Darwin does the same for variable-argument functions. */
if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
|| (cum->stdarg && DEFAULT_ABI != ABI_V4))
@@ -4120,9 +4695,9 @@ function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
stack = true;
if (stack)
- {
+ {
int align;
-
+
/* Vector parameters must be 16-byte aligned. This places
them at 2 mod 4 in terms of words in 32-bit mode, since
the parameter save area starts at offset 24 from the
@@ -4135,13 +4710,13 @@ function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
else
align = cum->words & 1;
cum->words += align + rs6000_arg_size (mode, type);
-
+
if (TARGET_DEBUG_ARG)
{
- fprintf (stderr, "function_adv: words = %2d, align=%d, ",
+ fprintf (stderr, "function_adv: words = %2d, align=%d, ",
cum->words, align);
fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
- cum->nargs_prototype, cum->prototype,
+ cum->nargs_prototype, cum->prototype,
GET_MODE_NAME (mode));
}
}
@@ -4150,17 +4725,46 @@ function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
&& !cum->stdarg
&& cum->sysv_gregno <= GP_ARG_MAX_REG)
cum->sysv_gregno++;
+
+ else if (rs6000_darwin64_abi
+ && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE
+ && (size = int_size_in_bytes (type)) > 0)
+ {
+ /* Variable sized types have size == -1 and are
+ treated as if consisting entirely of ints.
+ Pad to 16 byte boundary if needed. */
+ if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+ /* For varargs, we can just go up by the size of the struct. */
+ if (!named)
+ cum->words += (size + 7) / 8;
+ else
+ {
+ /* It is tempting to say int register count just goes up by
+ sizeof(type)/8, but this is wrong in a case such as
+ { int; double; int; } [powerpc alignment]. We have to
+ grovel through the fields for these too. */
+ cum->intoffset = 0;
+ rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
+ rs6000_darwin64_record_arg_advance_flush (cum,
+ size * BITS_PER_UNIT);
+ }
+ }
else if (DEFAULT_ABI == ABI_V4)
{
if (TARGET_HARD_FLOAT && TARGET_FPRS
- && (mode == SFmode || mode == DFmode))
+ && (mode == SFmode || mode == DFmode
+ || (mode == TFmode && !TARGET_IEEEQUAD)))
{
- if (cum->fregno <= FP_ARG_V4_MAX_REG)
- cum->fregno++;
+ if (cum->fregno + (mode == TFmode ? 1 : 0) <= FP_ARG_V4_MAX_REG)
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
else
{
- if (mode == DFmode)
- cum->words += cum->words & 1;
+ cum->fregno = FP_ARG_V4_MAX_REG + 1;
+ if (mode == DFmode || mode == TFmode)
+ cum->words += cum->words & 1;
cum->words += rs6000_arg_size (mode, type);
}
}
@@ -4205,17 +4809,13 @@ function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
else
{
int n_words = rs6000_arg_size (mode, type);
- int align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
+ int start_words = cum->words;
+ int align_words = rs6000_parm_start (mode, type, start_words);
- /* The simple alignment calculation here works because
- function_arg_boundary / PARM_BOUNDARY will only be 1 or 2.
- If we ever want to handle alignments larger than 8 bytes for
- 32-bit or 16 bytes for 64-bit, then we'll need to take into
- account the offset to the start of the parm save area. */
- align &= cum->words;
- cum->words += align + n_words;
+ cum->words = align_words + n_words;
- if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ if (SCALAR_FLOAT_MODE_P (mode)
+ && !DECIMAL_FLOAT_MODE_P (mode)
&& TARGET_HARD_FLOAT && TARGET_FPRS)
cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
@@ -4225,20 +4825,61 @@ function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
cum->words, cum->fregno);
fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
- fprintf (stderr, "named = %d, align = %d\n", named, align);
+ fprintf (stderr, "named = %d, align = %d, depth = %d\n",
+ named, align_words - start_words, depth);
}
}
}
-/* Determine where to put a SIMD argument on the SPE. */
+static rtx
+spe_build_register_parallel (enum machine_mode mode, int gregno)
+{
+ rtx r1, r3;
+
+ switch (mode)
+ {
+ case DFmode:
+ r1 = gen_rtx_REG (DImode, gregno);
+ r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
+ return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
+
+ case DCmode:
+ r1 = gen_rtx_REG (DImode, gregno);
+ r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
+ r3 = gen_rtx_REG (DImode, gregno + 2);
+ r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
+
+ default:
+ gcc_unreachable ();
+ }
+}
+/* Determine where to put a SIMD argument on the SPE. */
static rtx
-rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type)
{
+ int gregno = cum->sysv_gregno;
+
+ /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
+ are passed and returned in a pair of GPRs for ABI compatibility. */
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == DCmode))
+ {
+ int n_words = rs6000_arg_size (mode, type);
+
+ /* Doubles go in an odd/even register pair (r5/r6, etc). */
+ if (mode == DFmode)
+ gregno += (1 - gregno) & 1;
+
+ /* Multi-reg args are not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ return NULL_RTX;
+
+ return spe_build_register_parallel (mode, gregno);
+ }
if (cum->stdarg)
{
- int gregno = cum->sysv_gregno;
int n_words = rs6000_arg_size (mode, type);
/* SPE vectors are put in odd registers. */
@@ -4261,13 +4902,198 @@ rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
}
else
{
- if (cum->sysv_gregno <= GP_ARG_MAX_REG)
- return gen_rtx_REG (mode, cum->sysv_gregno);
+ if (gregno <= GP_ARG_MAX_REG)
+ return gen_rtx_REG (mode, gregno);
else
return NULL_RTX;
}
}
+/* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
+ structure between cum->intoffset and bitpos to integer registers. */
+
+static void
+rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
+ HOST_WIDE_INT bitpos, rtx rvec[], int *k)
+{
+ enum machine_mode mode;
+ unsigned int regno;
+ unsigned int startbit, endbit;
+ int this_regno, intregs, intoffset;
+ rtx reg;
+
+ if (cum->intoffset == -1)
+ return;
+
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+
+ /* If this is the trailing part of a word, try to only load that
+ much into the register. Otherwise load the whole register. Note
+ that in the latter case we may pick up unwanted bits. It's not a
+ problem at the moment but may wish to revisit. */
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
+ {
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
+ mode = word_mode;
+ }
+ }
+ else
+ mode = word_mode;
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ this_regno = cum->words + intoffset / BITS_PER_WORD;
+
+ if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
+ cum->use_stack = 1;
+
+ intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
+ if (intregs <= 0)
+ return;
+
+ intoffset /= BITS_PER_UNIT;
+ do
+ {
+ regno = GP_ARG_MIN_REG + this_regno;
+ reg = gen_rtx_REG (mode, regno);
+ rvec[(*k)++] =
+ gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
+
+ this_regno += 1;
+ intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
+ mode = word_mode;
+ intregs -= 1;
+ }
+ while (intregs > 0);
+}
+
+/* Recursive workhorse for the following. */
+
+static void
+rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, tree type,
+ HOST_WIDE_INT startbitpos, rtx rvec[],
+ int *k)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode;
+ if (ftype == error_mark_node)
+ continue;
+ mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
+ else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
+ {
+#if 0
+ switch (mode)
+ {
+ case SCmode: mode = SFmode; break;
+ case DCmode: mode = DFmode; break;
+ case TCmode: mode = TFmode; break;
+ default: break;
+ }
+#endif
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->fregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ if (mode == TFmode)
+ cum->fregno++;
+ }
+ else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
+ {
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
+}
+
+/* For the darwin64 ABI, we want to construct a PARALLEL consisting of
+ the register(s) to be used for each field and subfield of a struct
+ being passed by value, along with the offset of where the
+ register's value may be found in the block. FP fields go in FP
+ register, vector fields go in vector registers, and everything
+ else goes in int registers, packed as in memory.
+
+ This code is also used for function return values. RETVAL indicates
+ whether this is the case.
+
+ Much of this is taken from the SPARC V9 port, which has a similar
+ calling convention. */
+
+static rtx
+rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, tree type,
+ int named, bool retval)
+{
+ rtx rvec[FIRST_PSEUDO_REGISTER];
+ int k = 1, kbase = 1;
+ HOST_WIDE_INT typesize = int_size_in_bytes (type);
+ /* This is a copy; modifications are not visible to our caller. */
+ CUMULATIVE_ARGS copy_cum = *orig_cum;
+ CUMULATIVE_ARGS *cum = &copy_cum;
+
+ /* Pad to 16 byte boundary if needed. */
+ if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+
+ cum->intoffset = 0;
+ cum->use_stack = 0;
+ cum->named = named;
+
+ /* Put entries into rvec[] for individual FP and vector fields, and
+ for the chunks of memory that go in int regs. Note we start at
+ element 1; 0 is reserved for an indication of using memory, and
+ may or may not be filled in below. */
+ rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
+ rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
+
+ /* If any part of the struct went on the stack put all of it there.
+ This hack is because the generic code for
+ FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
+ parts of the struct are not at the beginning. */
+ if (cum->use_stack)
+ {
+ if (retval)
+ return NULL_RTX; /* doesn't go in registers at all */
+ kbase = 0;
+ rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ }
+ if (k > 1 || cum->use_stack)
+ return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
+ else
+ return NULL_RTX;
+}
+
/* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
static rtx
@@ -4293,15 +5119,13 @@ rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
if (align_words + n_units > GP_ARG_NUM_REG)
/* Not all of the arg fits in gprs. Say that it goes in memory too,
using a magic NULL_RTX component.
- FIXME: This is not strictly correct. Only some of the arg
- belongs in memory, not all of it. However, there isn't any way
- to do this currently, apart from building rtx descriptions for
- the pieces of memory we want stored. Due to bugs in the generic
- code we can't use the normal function_arg_partial_nregs scheme
- with the PARALLEL arg description we emit here.
- In any case, the code to store the whole arg to memory is often
- more efficient than code to store pieces, and we know that space
- is available in the right place for the whole arg. */
+ This is not strictly correct. Only some of the arg belongs in
+ memory, not all of it. However, the normal scheme using
+ function_arg_partial_nregs can result in unusual subregs, eg.
+ (subreg:SI (reg:DF) 4), which are not handled well. The code to
+ store the whole arg to memory is often more efficient than code
+ to store pieces, and we know that space is available in the right
+ place for the whole arg. */
rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
i = 0;
@@ -4325,7 +5149,8 @@ rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
This is null for libcalls where that information may
not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
- the preceding args and about the function being called.
+ the preceding args and about the function being called. It is
+ not modified in this routine.
NAMED is nonzero if this argument is a named parameter
(otherwise it is an extra parameter matching an ellipsis).
@@ -4343,8 +5168,8 @@ rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
with MODE and TYPE set to that of the pointer to the arg, not the arg
itself. */
-struct rtx_def *
-function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+rtx
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
enum rs6000_abi abi = DEFAULT_ABI;
@@ -4374,34 +5199,46 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
return GEN_INT (cum->call_cookie);
}
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE)
+ {
+ rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
+ if (rslt != NULL_RTX)
+ return rslt;
+ /* Else fall through to usual handling. */
+ }
+
if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
if (TARGET_64BIT && ! cum->prototype)
{
- /* Vector parameters get passed in vector register
- and also in GPRs or memory, in absence of prototype. */
- int align_words;
- rtx slot;
- align_words = (cum->words + 1) & ~1;
-
- if (align_words >= GP_ARG_NUM_REG)
- {
- slot = NULL_RTX;
- }
- else
- {
- slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
- }
- return gen_rtx_PARALLEL (mode,
- gen_rtvec (2,
- gen_rtx_EXPR_LIST (VOIDmode,
- slot, const0_rtx),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (mode, cum->vregno),
- const0_rtx)));
+ /* Vector parameters get passed in vector register
+ and also in GPRs or memory, in absence of prototype. */
+ int align_words;
+ rtx slot;
+ align_words = (cum->words + 1) & ~1;
+
+ if (align_words >= GP_ARG_NUM_REG)
+ {
+ slot = NULL_RTX;
+ }
+ else
+ {
+ slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ }
+ return gen_rtx_PARALLEL (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ slot, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno),
+ const0_rtx)));
}
else
return gen_rtx_REG (mode, cum->vregno);
- else if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ else if (TARGET_ALTIVEC_ABI
+ && (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) == 16)))
{
if (named || abi == ABI_V4)
return NULL_RTX;
@@ -4442,14 +5279,19 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
}
}
- else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode))
+ else if (TARGET_SPE_ABI && TARGET_SPE
+ && (SPE_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && (mode == DFmode
+ || mode == DCmode))))
return rs6000_spe_function_arg (cum, mode, type);
+
else if (abi == ABI_V4)
{
if (TARGET_HARD_FLOAT && TARGET_FPRS
- && (mode == SFmode || mode == DFmode))
+ && (mode == SFmode || mode == DFmode
+ || (mode == TFmode && !TARGET_IEEEQUAD)))
{
- if (cum->fregno <= FP_ARG_V4_MAX_REG)
+ if (cum->fregno + (mode == TFmode ? 1 : 0) <= FP_ARG_V4_MAX_REG)
return gen_rtx_REG (mode, cum->fregno);
else
return NULL_RTX;
@@ -4477,8 +5319,7 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
}
else
{
- int align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
- int align_words = cum->words + (cum->words & align);
+ int align_words = rs6000_parm_start (mode, type, cum->words);
if (USE_FP_FOR_ARG_P (cum, mode, type))
{
@@ -4493,8 +5334,7 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
{
/* Currently, we only ever need one reg here because complex
doubles are split. */
- if (cum->fregno != FP_ARG_MAX_REG || fmode != TFmode)
- abort ();
+ gcc_assert (cum->fregno == FP_ARG_MAX_REG && fmode == TFmode);
/* Long double split over regs and memory. */
fmode = DFmode;
@@ -4527,11 +5367,21 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
include the portion actually in registers here. */
enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
rtx off;
+ int i = 0;
+ if (align_words + n_words > GP_ARG_NUM_REG)
+ /* Not all of the arg fits in gprs. Say that it
+ goes in memory too, using a magic NULL_RTX
+ component. Also see comment in
+ rs6000_mixed_function_arg for why the normal
+ function_arg_partial_nregs scheme doesn't work
+ in this case. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
+ const0_rtx);
do
{
r = gen_rtx_REG (rmode,
GP_ARG_MIN_REG + align_words);
- off = GEN_INT (k * GET_MODE_SIZE (rmode));
+ off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
}
while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
@@ -4559,6 +5409,9 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
if (TARGET_32BIT && TARGET_POWERPC64)
return rs6000_mixed_function_arg (mode, type, align_words);
+ if (mode == BLKmode)
+ mode = Pmode;
+
return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
}
else
@@ -4567,18 +5420,16 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
}
/* For an arg passed partly in registers and partly in memory, this is
- the number of registers used. For args passed entirely in registers
- or entirely in memory, zero. When an arg is described by a PARALLEL,
- perhaps using more than one register type, this function returns the
- number of registers used by the first element of the PARALLEL. */
+ the number of bytes passed in registers. For args passed entirely in
+ registers or entirely in memory, zero. When an arg is described by a
+ PARALLEL, perhaps using more than one register type, this function
+ returns the number of bytes used by the first element of the PARALLEL. */
-int
-function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, int named)
+static int
+rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, bool named)
{
int ret = 0;
- int align;
- int parm_offset;
int align_words;
if (DEFAULT_ABI == ABI_V4)
@@ -4588,32 +5439,39 @@ function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
&& cum->nargs_prototype >= 0)
return 0;
- align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
- parm_offset = TARGET_32BIT ? 2 : 0;
- align_words = cum->words + ((parm_offset - cum->words) & align);
-
- if (USE_FP_FOR_ARG_P (cum, mode, type)
- /* If we are passing this arg in gprs as well, then this function
- should return the number of gprs (or memory) partially passed,
- *not* the number of fprs. */
- && !(type
- && (cum->nargs_prototype <= 0
- || (DEFAULT_ABI == ABI_AIX
- && TARGET_XL_COMPAT
- && align_words >= GP_ARG_NUM_REG))))
- {
- if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3) > FP_ARG_MAX_REG + 1)
- ret = FP_ARG_MAX_REG + 1 - cum->fregno;
+ /* In this complicated case we just disable the partial_nregs code. */
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE
+ && int_size_in_bytes (type) > 0)
+ return 0;
+
+ align_words = rs6000_parm_start (mode, type, cum->words);
+
+ if (USE_FP_FOR_ARG_P (cum, mode, type))
+ {
+ /* If we are passing this arg in the fixed parameter save area
+ (gprs or memory) as well as fprs, then this function should
+ return the number of partial bytes passed in the parameter
+ save area rather than partial bytes passed in fprs. */
+ if (type
+ && (cum->nargs_prototype <= 0
+ || (DEFAULT_ABI == ABI_AIX
+ && TARGET_XL_COMPAT
+ && align_words >= GP_ARG_NUM_REG)))
+ return 0;
+ else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
+ > FP_ARG_MAX_REG + 1)
+ ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
else if (cum->nargs_prototype >= 0)
return 0;
}
if (align_words < GP_ARG_NUM_REG
&& GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
- ret = GP_ARG_NUM_REG - align_words;
+ ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
if (ret != 0 && TARGET_DEBUG_ARG)
- fprintf (stderr, "function_arg_partial_nregs: %d\n", ret);
+ fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
return ret;
}
@@ -4632,22 +5490,60 @@ function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
As an extension to all ABIs, variable sized types are passed by
reference. */
-int
-function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- tree type, int named ATTRIBUTE_UNUSED)
+static bool
+rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, tree type,
+ bool named ATTRIBUTE_UNUSED)
{
- if ((DEFAULT_ABI == ABI_V4
- && ((type && AGGREGATE_TYPE_P (type))
- || mode == TFmode))
- || (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
- || (type && int_size_in_bytes (type) < 0))
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
{
if (TARGET_DEBUG_ARG)
- fprintf (stderr, "function_arg_pass_by_reference\n");
+ fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
+ return 1;
+ }
+ if (!type)
+ return 0;
+
+ if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
+ return 1;
+ }
+
+ if (int_size_in_bytes (type) < 0)
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
+ return 1;
+ }
+
+ /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
+ modes only exist for GCC vector types if -maltivec. */
+ if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
+ return 1;
+ }
+
+ /* Pass synthetic vectors in memory. */
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
+ {
+ static bool warned_for_pass_big_vectors = false;
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
+ if (!warned_for_pass_big_vectors)
+ {
+ warning (0, "GCC vector passed by reference: "
+ "non-standard ABI extension with no compatibility guarantee");
+ warned_for_pass_big_vectors = true;
+ }
return 1;
}
+
return 0;
}
@@ -4660,30 +5556,28 @@ rs6000_move_block_from_reg (int regno, rtx x, int nregs)
if (nregs == 0)
return;
- for (i = 0; i < nregs; i++)
+ for (i = 0; i < nregs; i++)
{
- rtx tem = adjust_address_nv (x, reg_mode, i*GET_MODE_SIZE(reg_mode));
+ rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
if (reload_completed)
- {
- if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
- tem = NULL_RTX;
- else
- tem = simplify_gen_subreg (reg_mode, x, BLKmode,
- i * GET_MODE_SIZE(reg_mode));
- }
+ {
+ if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
+ tem = NULL_RTX;
+ else
+ tem = simplify_gen_subreg (reg_mode, x, BLKmode,
+ i * GET_MODE_SIZE (reg_mode));
+ }
else
tem = replace_equiv_address (tem, XEXP (tem, 0));
- if (tem == NULL_RTX)
- abort ();
+ gcc_assert (tem);
emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
}
}
-
/* Perform any needed actions needed for a function that is receiving a
- variable number of arguments.
+ variable number of arguments.
CUM is as above.
@@ -4697,8 +5591,9 @@ rs6000_move_block_from_reg (int regno, rtx x, int nregs)
stack and set PRETEND_SIZE to the length of the registers pushed. */
static void
-setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, int *pretend_size ATTRIBUTE_UNUSED, int no_rtl)
+setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
{
CUMULATIVE_ARGS next_cum;
int reg_size = TARGET_32BIT ? 4 : 8;
@@ -4707,68 +5602,145 @@ setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
/* Skip the last named argument. */
next_cum = *cum;
- function_arg_advance (&next_cum, mode, type, 1);
+ function_arg_advance (&next_cum, mode, type, 1, 0);
if (DEFAULT_ABI == ABI_V4)
{
- /* Indicate to allocate space on the stack for varargs save area. */
- cfun->machine->sysv_varargs_p = 1;
+ first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
+
if (! no_rtl)
- save_area = plus_constant (virtual_stack_vars_rtx,
- - RS6000_VARARGS_SIZE);
+ {
+ int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
+ HOST_WIDE_INT offset = 0;
+
+ /* Try to optimize the size of the varargs save area.
+ The ABI requires that ap.reg_save_area is doubleword
+ aligned, but we don't need to allocate space for all
+ the bytes, only those to which we actually will save
+ anything. */
+ if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
+ gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && next_cum.fregno <= FP_ARG_V4_MAX_REG
+ && cfun->va_list_fpr_size)
+ {
+ if (gpr_reg_num)
+ fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD;
+ if (cfun->va_list_fpr_size
+ < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
+ fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
+ else
+ fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
+ * UNITS_PER_FP_WORD;
+ }
+ if (gpr_reg_num)
+ {
+ offset = -((first_reg_offset * reg_size) & ~7);
+ if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
+ {
+ gpr_reg_num = cfun->va_list_gpr_size;
+ if (reg_size == 4 && (first_reg_offset & 1))
+ gpr_reg_num++;
+ }
+ gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
+ }
+ else if (fpr_size)
+ offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD
+ - (int) (GP_ARG_NUM_REG * reg_size);
- first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
+ if (gpr_size + fpr_size)
+ {
+ rtx reg_save_area
+ = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
+ gcc_assert (GET_CODE (reg_save_area) == MEM);
+ reg_save_area = XEXP (reg_save_area, 0);
+ if (GET_CODE (reg_save_area) == PLUS)
+ {
+ gcc_assert (XEXP (reg_save_area, 0)
+ == virtual_stack_vars_rtx);
+ gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
+ offset += INTVAL (XEXP (reg_save_area, 1));
+ }
+ else
+ gcc_assert (reg_save_area == virtual_stack_vars_rtx);
+ }
+
+ cfun->machine->varargs_save_offset = offset;
+ save_area = plus_constant (virtual_stack_vars_rtx, offset);
+ }
}
else
{
first_reg_offset = next_cum.words;
save_area = virtual_incoming_args_rtx;
- cfun->machine->sysv_varargs_p = 0;
- if (MUST_PASS_IN_STACK (mode, type))
+ if (targetm.calls.must_pass_in_stack (mode, type))
first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
}
set = get_varargs_alias_set ();
- if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG)
+ if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
+ && cfun->va_list_gpr_size)
{
+ int nregs = GP_ARG_NUM_REG - first_reg_offset;
+
+ if (va_list_gpr_counter_field)
+ {
+ /* V4 va_list_gpr_size counts number of registers needed. */
+ if (nregs > cfun->va_list_gpr_size)
+ nregs = cfun->va_list_gpr_size;
+ }
+ else
+ {
+ /* char * va_list instead counts number of bytes needed. */
+ if (nregs > cfun->va_list_gpr_size / reg_size)
+ nregs = cfun->va_list_gpr_size / reg_size;
+ }
+
mem = gen_rtx_MEM (BLKmode,
- plus_constant (save_area,
- first_reg_offset * reg_size)),
+ plus_constant (save_area,
+ first_reg_offset * reg_size));
+ MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
set_mem_align (mem, BITS_PER_WORD);
- rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
- GP_ARG_NUM_REG - first_reg_offset);
+ rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
+ nregs);
}
/* Save FP registers if needed. */
if (DEFAULT_ABI == ABI_V4
&& TARGET_HARD_FLOAT && TARGET_FPRS
&& ! no_rtl
- && next_cum.fregno <= FP_ARG_V4_MAX_REG)
+ && next_cum.fregno <= FP_ARG_V4_MAX_REG
+ && cfun->va_list_fpr_size)
{
- int fregno = next_cum.fregno;
+ int fregno = next_cum.fregno, nregs;
rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
rtx lab = gen_label_rtx ();
- int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG) * 8);
+ int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD);
- emit_jump_insn (gen_rtx_SET (VOIDmode,
- pc_rtx,
- gen_rtx_IF_THEN_ELSE (VOIDmode,
+ emit_jump_insn
+ (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
gen_rtx_NE (VOIDmode, cr1,
- const0_rtx),
+ const0_rtx),
gen_rtx_LABEL_REF (VOIDmode, lab),
pc_rtx)));
- while (fregno <= FP_ARG_V4_MAX_REG)
+ for (nregs = 0;
+ fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
+ fregno++, off += UNITS_PER_FP_WORD, nregs++)
{
mem = gen_rtx_MEM (DFmode, plus_constant (save_area, off));
- set_mem_alias_set (mem, set);
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
set_mem_align (mem, GET_MODE_ALIGNMENT (DFmode));
emit_move_insn (mem, gen_rtx_REG (DFmode, fregno));
- fregno++;
- off += 8;
}
emit_label (lab);
@@ -4790,9 +5762,9 @@ rs6000_build_builtin_va_list (void)
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
- f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
+ f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
unsigned_char_type_node);
- f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
+ f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
unsigned_char_type_node);
/* Give the two bytes of padding a name, so that -Wpadded won't warn on
every user file. */
@@ -4803,6 +5775,9 @@ rs6000_build_builtin_va_list (void)
f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
ptr_type_node);
+ va_list_gpr_counter_field = f_gpr;
+ va_list_fpr_counter_field = f_fpr;
+
DECL_FIELD_CONTEXT (f_gpr) = record;
DECL_FIELD_CONTEXT (f_fpr) = record;
DECL_FIELD_CONTEXT (f_res) = record;
@@ -4845,85 +5820,88 @@ rs6000_va_start (tree valist, rtx nextarg)
f_ovf = TREE_CHAIN (f_res);
f_sav = TREE_CHAIN (f_ovf);
- valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav);
+ valist = build_va_arg_indirect_ref (valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
/* Count number of gp and fp argument registers used. */
words = current_function_args_info.words;
- n_gpr = current_function_args_info.sysv_gregno - GP_ARG_MIN_REG;
- n_fpr = current_function_args_info.fregno - FP_ARG_MIN_REG;
+ n_gpr = MIN (current_function_args_info.sysv_gregno - GP_ARG_MIN_REG,
+ GP_ARG_NUM_REG);
+ n_fpr = MIN (current_function_args_info.fregno - FP_ARG_MIN_REG,
+ FP_ARG_NUM_REG);
if (TARGET_DEBUG_ARG)
fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
words, n_gpr, n_fpr);
- t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr, build_int_2 (n_gpr, 0));
- TREE_SIDE_EFFECTS (t) = 1;
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ if (cfun->va_list_gpr_size)
+ {
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
- t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr, build_int_2 (n_fpr, 0));
- TREE_SIDE_EFFECTS (t) = 1;
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ if (cfun->va_list_fpr_size)
+ {
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
+ build_int_cst (NULL_TREE, n_fpr));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
/* Find the overflow area. */
t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
if (words != 0)
- t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
- build_int_2 (words * UNITS_PER_WORD, 0));
- t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ t = build2 (PLUS_EXPR, TREE_TYPE (ovf), t,
+ build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ /* If there were no va_arg invocations, don't set up the register
+ save area. */
+ if (!cfun->va_list_gpr_size
+ && !cfun->va_list_fpr_size
+ && n_gpr < GP_ARG_NUM_REG
+ && n_fpr < FP_ARG_V4_MAX_REG)
+ return;
+
/* Find the register save area. */
t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
- t = build (PLUS_EXPR, TREE_TYPE (sav), t,
- build_int_2 (-RS6000_VARARGS_SIZE, -1));
- t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
+ if (cfun->machine->varargs_save_offset)
+ t = build2 (PLUS_EXPR, TREE_TYPE (sav), t,
+ build_int_cst (NULL_TREE, cfun->machine->varargs_save_offset));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
/* Implement va_arg. */
-rtx
-rs6000_va_arg (tree valist, tree type)
+tree
+rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
{
tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
tree gpr, fpr, ovf, sav, reg, t, u;
- int indirect_p, size, rsize, n_reg, sav_ofs, sav_scale;
- rtx lab_false, lab_over, addr_rtx, r;
+ int size, rsize, n_reg, sav_ofs, sav_scale;
+ tree lab_false, lab_over, addr;
int align;
+ tree ptrtype = build_pointer_type (type);
- if (DEFAULT_ABI != ABI_V4)
+ if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
{
- /* Variable sized types are passed by reference, as are AltiVec
- vectors when 32-bit and not using the AltiVec ABI extension. */
- if (int_size_in_bytes (type) < 0
- || (TARGET_32BIT
- && !TARGET_ALTIVEC_ABI
- && ALTIVEC_VECTOR_MODE (TYPE_MODE (type))))
- {
- u = build_pointer_type (type);
-
- /* Args grow upward. */
- t = build (POSTINCREMENT_EXPR, TREE_TYPE (valist), valist,
- build_int_2 (POINTER_SIZE / BITS_PER_UNIT, 0));
- TREE_SIDE_EFFECTS (t) = 1;
-
- t = build1 (NOP_EXPR, build_pointer_type (u), t);
- TREE_SIDE_EFFECTS (t) = 1;
-
- t = build1 (INDIRECT_REF, u, t);
- TREE_SIDE_EFFECTS (t) = 1;
+ t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
+ return build_va_arg_indirect_ref (t);
+ }
- return expand_expr (t, NULL_RTX, VOIDmode, EXPAND_NORMAL);
- }
- if (targetm.calls.split_complex_arg
- && TREE_CODE (type) == COMPLEX_TYPE)
+ if (DEFAULT_ABI != ABI_V4)
+ {
+ if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
{
tree elem_type = TREE_TYPE (type);
enum machine_mode elem_mode = TYPE_MODE (elem_type);
@@ -4931,33 +5909,24 @@ rs6000_va_arg (tree valist, tree type)
if (elem_size < UNITS_PER_WORD)
{
- rtx real_part, imag_part, dest_real, rr;
-
- real_part = rs6000_va_arg (valist, elem_type);
- imag_part = rs6000_va_arg (valist, elem_type);
+ tree real_part, imag_part;
+ tree post = NULL_TREE;
- /* We're not returning the value here, but the address.
- real_part and imag_part are not contiguous, and we know
- there is space available to pack real_part next to
- imag_part. float _Complex is not promoted to
- double _Complex by the default promotion rules that
- promote float to double. */
- if (2 * elem_size > UNITS_PER_WORD)
- abort ();
+ real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
+ &post);
+ /* Copy the value into a temporary, lest the formal temporary
+ be reused out from under us. */
+ real_part = get_initialized_tmp_var (real_part, pre_p, &post);
+ append_to_statement_list (post, pre_p);
- real_part = gen_rtx_MEM (elem_mode, real_part);
- imag_part = gen_rtx_MEM (elem_mode, imag_part);
+ imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
+ post_p);
- dest_real = adjust_address (imag_part, elem_mode, -elem_size);
- rr = gen_reg_rtx (elem_mode);
- emit_move_insn (rr, real_part);
- emit_move_insn (dest_real, rr);
-
- return XEXP (dest_real, 0);
+ return build2 (COMPLEX_EXPR, type, real_part, imag_part);
}
}
- return std_expand_builtin_va_arg (valist, type);
+ return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
}
f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
@@ -4966,46 +5935,32 @@ rs6000_va_arg (tree valist, tree type)
f_ovf = TREE_CHAIN (f_res);
f_sav = TREE_CHAIN (f_ovf);
- valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav);
+ valist = build_va_arg_indirect_ref (valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
size = int_size_in_bytes (type);
rsize = (size + 3) / 4;
align = 1;
- if (AGGREGATE_TYPE_P (type)
- || TYPE_MODE (type) == TFmode
- || (!TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type))))
- {
- /* Aggregates, long doubles, and AltiVec vectors are passed by
- reference. */
- indirect_p = 1;
- reg = gpr;
- n_reg = 1;
- sav_ofs = 0;
- sav_scale = 4;
- size = 4;
- rsize = 1;
- }
- else if (TARGET_HARD_FLOAT && TARGET_FPRS
- && (TYPE_MODE (type) == SFmode || TYPE_MODE (type) == DFmode))
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && (TYPE_MODE (type) == SFmode
+ || TYPE_MODE (type) == DFmode
+ || TYPE_MODE (type) == TFmode))
{
/* FP args go in FP registers, if present. */
- indirect_p = 0;
reg = fpr;
- n_reg = 1;
+ n_reg = (size + 7) / 8;
sav_ofs = 8*4;
sav_scale = 8;
- if (TYPE_MODE (type) == DFmode)
+ if (TYPE_MODE (type) != SFmode)
align = 8;
}
else
{
/* Otherwise into GP registers. */
- indirect_p = 0;
reg = gpr;
n_reg = rsize;
sav_ofs = 0;
@@ -5016,66 +5971,59 @@ rs6000_va_arg (tree valist, tree type)
/* Pull the value out of the saved registers.... */
- lab_over = NULL_RTX;
- addr_rtx = gen_reg_rtx (Pmode);
+ lab_over = NULL;
+ addr = create_tmp_var (ptr_type_node, "addr");
+ DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
/* AltiVec vectors never go in registers when -mabi=altivec. */
if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
align = 16;
else
{
- lab_false = gen_label_rtx ();
- lab_over = gen_label_rtx ();
+ lab_false = create_artificial_label ();
+ lab_over = create_artificial_label ();
/* Long long and SPE vectors are aligned in the registers.
As are any other 2 gpr item such as complex int due to a
historical mistake. */
u = reg;
- if (n_reg == 2)
+ if (n_reg == 2 && reg == gpr)
{
- u = build (BIT_AND_EXPR, TREE_TYPE (reg), reg,
- build_int_2 (n_reg - 1, 0));
- u = build (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, u);
- TREE_SIDE_EFFECTS (u) = 1;
+ u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), reg,
+ size_int (n_reg - 1));
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, u);
}
- emit_cmp_and_jump_insns
- (expand_expr (u, NULL_RTX, QImode, EXPAND_NORMAL),
- GEN_INT (8 - n_reg + 1), GE, const1_rtx, QImode, 1,
- lab_false);
+ t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
+ t = build2 (GE_EXPR, boolean_type_node, u, t);
+ u = build1 (GOTO_EXPR, void_type_node, lab_false);
+ t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
+ gimplify_and_add (t, pre_p);
t = sav;
if (sav_ofs)
- t = build (PLUS_EXPR, ptr_type_node, sav, build_int_2 (sav_ofs, 0));
-
- u = build (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg,
- build_int_2 (n_reg, 0));
- TREE_SIDE_EFFECTS (u) = 1;
+ t = build2 (PLUS_EXPR, ptr_type_node, sav, size_int (sav_ofs));
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, size_int (n_reg));
u = build1 (CONVERT_EXPR, integer_type_node, u);
- TREE_SIDE_EFFECTS (u) = 1;
+ u = build2 (MULT_EXPR, integer_type_node, u, size_int (sav_scale));
+ t = build2 (PLUS_EXPR, ptr_type_node, t, u);
- u = build (MULT_EXPR, integer_type_node, u, build_int_2 (sav_scale, 0));
- TREE_SIDE_EFFECTS (u) = 1;
+ t = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (t, pre_p);
- t = build (PLUS_EXPR, ptr_type_node, t, u);
- TREE_SIDE_EFFECTS (t) = 1;
-
- r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL);
- if (r != addr_rtx)
- emit_move_insn (addr_rtx, r);
+ t = build1 (GOTO_EXPR, void_type_node, lab_over);
+ gimplify_and_add (t, pre_p);
- emit_jump_insn (gen_jump (lab_over));
- emit_barrier ();
+ t = build1 (LABEL_EXPR, void_type_node, lab_false);
+ append_to_statement_list (t, pre_p);
- emit_label (lab_false);
- if (n_reg > 2)
+ if ((n_reg == 2 && reg != gpr) || n_reg > 2)
{
/* Ensure that we don't find any more args in regs.
- Alignment has taken care of the n_reg == 2 case. */
- t = build (MODIFY_EXPR, TREE_TYPE (reg), reg, build_int_2 (8, 0));
- TREE_SIDE_EFFECTS (t) = 1;
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ Alignment has taken care of the n_reg == 2 gpr case. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (reg), reg, size_int (8));
+ gimplify_and_add (t, pre_p);
}
}
@@ -5085,41 +6033,65 @@ rs6000_va_arg (tree valist, tree type)
t = ovf;
if (align != 1)
{
- t = build (PLUS_EXPR, TREE_TYPE (t), t, build_int_2 (align - 1, 0));
- t = build (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-align, -1));
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t, size_int (align - 1));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (NULL_TREE, -align));
}
- t = save_expr (t);
+ gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
- r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL);
- if (r != addr_rtx)
- emit_move_insn (addr_rtx, r);
+ u = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (u, pre_p);
- t = build (PLUS_EXPR, TREE_TYPE (t), t, build_int_2 (size, 0));
- t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
- TREE_SIDE_EFFECTS (t) = 1;
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ gimplify_and_add (t, pre_p);
if (lab_over)
- emit_label (lab_over);
+ {
+ t = build1 (LABEL_EXPR, void_type_node, lab_over);
+ append_to_statement_list (t, pre_p);
+ }
- if (indirect_p)
+ if (STRICT_ALIGNMENT
+ && (TYPE_ALIGN (type)
+ > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
{
- r = gen_rtx_MEM (Pmode, addr_rtx);
- set_mem_alias_set (r, get_varargs_alias_set ());
- emit_move_insn (addr_rtx, r);
+ /* The value (of type complex double, for example) may not be
+ aligned in memory in the saved registers, so copy via a
+ temporary. (This is the same code as used for SPARC.) */
+ tree tmp = create_tmp_var (type, "va_arg_tmp");
+ tree dest_addr = build_fold_addr_expr (tmp);
+
+ tree copy = build_function_call_expr
+ (implicit_built_in_decls[BUILT_IN_MEMCPY],
+ tree_cons (NULL_TREE, dest_addr,
+ tree_cons (NULL_TREE, addr,
+ tree_cons (NULL_TREE, size_int (rsize * 4),
+ NULL_TREE))));
+
+ gimplify_and_add (copy, pre_p);
+ addr = dest_addr;
}
- return addr_rtx;
+ addr = fold_convert (ptrtype, addr);
+ return build_va_arg_indirect_ref (addr);
}
/* Builtins. */
-#define def_builtin(MASK, NAME, TYPE, CODE) \
-do { \
- if ((MASK) & target_flags) \
- builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
- NULL, NULL_TREE); \
-} while (0)
+static void
+def_builtin (int mask, const char *name, tree type, int code)
+{
+ if (mask & target_flags)
+ {
+ if (rs6000_builtin_decls[code])
+ abort ();
+
+ rs6000_builtin_decls[code] =
+ lang_hooks.builtin_function (name, type, code, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ }
+}
/* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
@@ -5135,19 +6107,35 @@ static const struct builtin_description bdesc_3arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
- { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
- { MASK_ALTIVEC, CODE_FOR_altivec_vperm_4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
- { MASK_ALTIVEC, CODE_FOR_altivec_vperm_4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vperm_8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vperm_16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsel_4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsel_4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsel_8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsel_16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL },
};
/* DST operations: void foo (void *, const int, const char). */
@@ -5157,7 +6145,12 @@ static const struct builtin_description bdesc_dst[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_dst, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST },
{ MASK_ALTIVEC, CODE_FOR_altivec_dstt, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT },
{ MASK_ALTIVEC, CODE_FOR_altivec_dstst, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST },
- { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT }
+ { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT }
};
/* Simple binary operations: VECc = foo (VECa, VECb). */
@@ -5176,7 +6169,7 @@ static struct builtin_description bdesc_2arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vadduws, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vaddsws, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS },
{ MASK_ALTIVEC, CODE_FOR_andv4si3, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND },
- { MASK_ALTIVEC, CODE_FOR_altivec_vandc, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
+ { MASK_ALTIVEC, CODE_FOR_andcv4si3, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
{ MASK_ALTIVEC, CODE_FOR_altivec_vavgub, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB },
{ MASK_ALTIVEC, CODE_FOR_altivec_vavgsb, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB },
{ MASK_ALTIVEC, CODE_FOR_altivec_vavguh, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH },
@@ -5228,14 +6221,12 @@ static struct builtin_description bdesc_2arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
- { MASK_ALTIVEC, CODE_FOR_altivec_vnor, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
{ MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
- { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhss, "__builtin_altivec_vpkuhss", ALTIVEC_BUILTIN_VPKUHSS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
- { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwss, "__builtin_altivec_vpkuwss", ALTIVEC_BUILTIN_VPKUWSS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
@@ -5252,12 +6243,12 @@ static struct builtin_description bdesc_2arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
{ MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
{ MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrb, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrh, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrw, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrab, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrah, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsraw, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
+ { MASK_ALTIVEC, CODE_FOR_lshrv16qi3, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_lshrv8hi3, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_lshrv4si3, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_ashrv16qi3, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_ashrv8hi3, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_ashrv4si3, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
{ MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
{ MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
{ MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
@@ -5278,6 +6269,134 @@ static struct builtin_description bdesc_2arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
{ MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR },
+
/* Place holder, leave as first spe builtin. */
{ 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
{ 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
@@ -5420,7 +6539,7 @@ static struct builtin_description bdesc_2arg[] =
{ 0, CODE_FOR_spe_brinc, "__builtin_spe_brinc", SPE_BUILTIN_BRINC },
/* Place-holder. Leave as last binary SPE builtin. */
- { 0, CODE_FOR_xorv2si3, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR },
+ { 0, CODE_FOR_xorv2si3, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR }
};
/* AltiVec predicates. */
@@ -5448,7 +6567,11 @@ static const struct builtin_description_predicates bdesc_altivec_preds[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
{ MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
{ MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P }
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P },
+
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpeq_p", ALTIVEC_BUILTIN_VCMPEQ_P },
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpgt_p", ALTIVEC_BUILTIN_VCMPGT_P },
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpge_p", ALTIVEC_BUILTIN_VCMPGE_P }
};
/* SPE predicates. */
@@ -5523,6 +6646,26 @@ static struct builtin_description bdesc_1arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
{ MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB },
+
/* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
end with SPE_BUILTIN_EVSUBFUSIAAW. */
{ 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
@@ -5555,7 +6698,7 @@ static struct builtin_description bdesc_1arg[] =
{ 0, CODE_FOR_spe_evsubfumiaaw, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW },
/* Place-holder. Leave as last unary SPE builtin. */
- { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
+ { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW }
};
static rtx
@@ -5563,7 +6706,7 @@ rs6000_expand_unop_builtin (enum insn_code icode, tree arglist, rtx target)
{
rtx pat;
tree arg0 = TREE_VALUE (arglist);
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
@@ -5583,8 +6726,8 @@ rs6000_expand_unop_builtin (enum insn_code icode, tree arglist, rtx target)
{
/* Only allow 5-bit *signed* literals. */
if (GET_CODE (op0) != CONST_INT
- || INTVAL (op0) > 0x1f
- || INTVAL (op0) < -0x1f)
+ || INTVAL (op0) > 15
+ || INTVAL (op0) < -16)
{
error ("argument 1 must be a 5-bit signed literal");
return const0_rtx;
@@ -5612,7 +6755,7 @@ altivec_expand_abs_builtin (enum insn_code icode, tree arglist, rtx target)
{
rtx pat, scratch1, scratch2;
tree arg0 = TREE_VALUE (arglist);
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
@@ -5645,8 +6788,8 @@ rs6000_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
rtx pat;
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
@@ -5713,15 +6856,15 @@ rs6000_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
}
static rtx
-altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
+altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
tree arglist, rtx target)
{
rtx pat, scratch;
tree cr6_form = TREE_VALUE (arglist);
tree arg0 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode tmode = SImode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
@@ -5735,8 +6878,7 @@ altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
else
cr6_form_int = TREE_INT_CST_LOW (cr6_form);
- if (mode0 != mode1)
- abort ();
+ gcc_assert (mode0 == mode1);
/* If we have invalid arguments, bail out before generating bad rtl. */
if (arg0 == error_mark_node || arg1 == error_mark_node)
@@ -5755,7 +6897,7 @@ altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
scratch = gen_reg_rtx (mode0);
pat = GEN_FCN (icode) (scratch, op0, op1,
- gen_rtx (SYMBOL_REF, Pmode, opcode));
+ gen_rtx_SYMBOL_REF (Pmode, opcode));
if (! pat)
return 0;
emit_insn (pat);
@@ -5768,24 +6910,24 @@ altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
If you think this is disgusting, look at the specs for the
AltiVec predicates. */
- switch (cr6_form_int)
- {
- case 0:
- emit_insn (gen_cr6_test_for_zero (target));
- break;
- case 1:
- emit_insn (gen_cr6_test_for_zero_reverse (target));
- break;
- case 2:
- emit_insn (gen_cr6_test_for_lt (target));
- break;
- case 3:
- emit_insn (gen_cr6_test_for_lt_reverse (target));
- break;
- default:
- error ("argument 1 of __builtin_altivec_predicate is out of range");
- break;
- }
+ switch (cr6_form_int)
+ {
+ case 0:
+ emit_insn (gen_cr6_test_for_zero (target));
+ break;
+ case 1:
+ emit_insn (gen_cr6_test_for_zero_reverse (target));
+ break;
+ case 2:
+ emit_insn (gen_cr6_test_for_lt (target));
+ break;
+ case 3:
+ emit_insn (gen_cr6_test_for_lt_reverse (target));
+ break;
+ default:
+ error ("argument 1 of __builtin_altivec_predicate is out of range");
+ break;
+ }
return target;
}
@@ -5799,8 +6941,8 @@ altivec_expand_lv_builtin (enum insn_code icode, tree arglist, rtx target)
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = Pmode;
enum machine_mode mode1 = Pmode;
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
if (icode == CODE_FOR_nothing)
/* Builtin not supported on this processor. */
@@ -5815,7 +6957,7 @@ altivec_expand_lv_builtin (enum insn_code icode, tree arglist, rtx target)
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
- op1 = copy_to_mode_reg (mode1, op1);
+ op1 = copy_to_mode_reg (mode1, op1);
if (op0 == const0_rtx)
{
@@ -5842,9 +6984,9 @@ spe_expand_stv_builtin (enum insn_code icode, tree arglist)
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
rtx pat;
enum machine_mode mode0 = insn_data[icode].operand[0].mode;
enum machine_mode mode1 = insn_data[icode].operand[1].mode;
@@ -5875,9 +7017,9 @@ altivec_expand_stv_builtin (enum insn_code icode, tree arglist)
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
rtx pat, addr;
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode1 = Pmode;
@@ -5892,7 +7034,7 @@ altivec_expand_stv_builtin (enum insn_code icode, tree arglist)
if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
op0 = copy_to_mode_reg (tmode, op0);
- op2 = copy_to_mode_reg (mode2, op2);
+ op2 = copy_to_mode_reg (mode2, op2);
if (op1 == const0_rtx)
{
@@ -5917,9 +7059,9 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree arglist, rtx target)
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
@@ -5935,12 +7077,13 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree arglist, rtx target)
|| arg2 == error_mark_node)
return const0_rtx;
- if (icode == CODE_FOR_altivec_vsldoi_4sf
- || icode == CODE_FOR_altivec_vsldoi_4si
- || icode == CODE_FOR_altivec_vsldoi_8hi
- || icode == CODE_FOR_altivec_vsldoi_16qi)
+ if (icode == CODE_FOR_altivec_vsldoi_v4sf
+ || icode == CODE_FOR_altivec_vsldoi_v4si
+ || icode == CODE_FOR_altivec_vsldoi_v8hi
+ || icode == CODE_FOR_altivec_vsldoi_v16qi)
{
/* Only allow 4-bit unsigned literals. */
+ STRIP_NOPS (arg2);
if (TREE_CODE (arg2) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2) & ~0xf)
{
@@ -5984,16 +7127,16 @@ altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
switch (fcode)
{
case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
- icode = CODE_FOR_altivec_lvx_16qi;
+ icode = CODE_FOR_altivec_lvx_v16qi;
break;
case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
- icode = CODE_FOR_altivec_lvx_8hi;
+ icode = CODE_FOR_altivec_lvx_v8hi;
break;
case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
- icode = CODE_FOR_altivec_lvx_4si;
+ icode = CODE_FOR_altivec_lvx_v4si;
break;
case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
- icode = CODE_FOR_altivec_lvx_4sf;
+ icode = CODE_FOR_altivec_lvx_v4sf;
break;
default:
*expandedp = false;
@@ -6003,7 +7146,7 @@ altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
*expandedp = true;
arg0 = TREE_VALUE (arglist);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
tmode = insn_data[icode].operand[0].mode;
mode0 = insn_data[icode].operand[1].mode;
@@ -6024,7 +7167,7 @@ altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
/* Expand the stvx builtins. */
static rtx
-altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
bool *expandedp)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -6038,16 +7181,16 @@ altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
switch (fcode)
{
case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
- icode = CODE_FOR_altivec_stvx_16qi;
+ icode = CODE_FOR_altivec_stvx_v16qi;
break;
case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
- icode = CODE_FOR_altivec_stvx_8hi;
+ icode = CODE_FOR_altivec_stvx_v8hi;
break;
case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
- icode = CODE_FOR_altivec_stvx_4si;
+ icode = CODE_FOR_altivec_stvx_v4si;
break;
case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
- icode = CODE_FOR_altivec_stvx_4sf;
+ icode = CODE_FOR_altivec_stvx_v4sf;
break;
default:
*expandedp = false;
@@ -6056,8 +7199,8 @@ altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
mode0 = insn_data[icode].operand[0].mode;
mode1 = insn_data[icode].operand[1].mode;
@@ -6076,7 +7219,7 @@ altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
/* Expand the dst builtins. */
static rtx
-altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
bool *expandedp)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -6098,9 +7241,9 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
mode2 = insn_data[d->icode].operand[2].mode;
@@ -6116,12 +7259,12 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
if (TREE_CODE (arg2) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2) & ~0x3)
{
- error ("argument to `%s' must be a 2-bit unsigned literal", d->name);
+ error ("argument to %qs must be a 2-bit unsigned literal", d->name);
return const0_rtx;
}
if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
- op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ op0 = copy_to_mode_reg (Pmode, op0);
if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
op1 = copy_to_mode_reg (mode1, op1);
@@ -6135,6 +7278,111 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
return NULL_RTX;
}
+/* Expand vec_init builtin. */
+static rtx
+altivec_expand_vec_init_builtin (tree type, tree arglist, rtx target)
+{
+ enum machine_mode tmode = TYPE_MODE (type);
+ enum machine_mode inner_mode = GET_MODE_INNER (tmode);
+ int i, n_elt = GET_MODE_NUNITS (tmode);
+ rtvec v = rtvec_alloc (n_elt);
+
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
+ {
+ rtx x = expand_normal (TREE_VALUE (arglist));
+ RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
+ }
+
+ gcc_assert (arglist == NULL);
+
+ if (!target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
+ return target;
+}
+
+/* Return the integer constant in ARG. Constrain it to be in the range
+ of the subparts of VEC_TYPE; issue an error if not. */
+
+static int
+get_element_number (tree vec_type, tree arg)
+{
+ unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
+
+ if (!host_integerp (arg, 1)
+ || (elt = tree_low_cst (arg, 1), elt > max))
+ {
+ error ("selector must be an integer constant in the range 0..%wi", max);
+ return 0;
+ }
+
+ return elt;
+}
+
+/* Expand vec_set builtin. */
+static rtx
+altivec_expand_vec_set_builtin (tree arglist)
+{
+ enum machine_mode tmode, mode1;
+ tree arg0, arg1, arg2;
+ int elt;
+ rtx op0, op1;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ tmode = TYPE_MODE (TREE_TYPE (arg0));
+ mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
+ elt = get_element_number (TREE_TYPE (arg0), arg2);
+
+ if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
+ op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
+
+ op0 = force_reg (tmode, op0);
+ op1 = force_reg (mode1, op1);
+
+ rs6000_expand_vector_set (op0, op1, elt);
+
+ return op0;
+}
+
+/* Expand vec_ext builtin. */
+static rtx
+altivec_expand_vec_ext_builtin (tree arglist, rtx target)
+{
+ enum machine_mode tmode, mode0;
+ tree arg0, arg1;
+ int elt;
+ rtx op0;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+
+ op0 = expand_normal (arg0);
+ elt = get_element_number (TREE_TYPE (arg0), arg1);
+
+ tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ mode0 = TYPE_MODE (TREE_TYPE (arg0));
+ gcc_assert (VECTOR_MODE_P (mode0));
+
+ op0 = force_reg (mode0, op0);
+
+ if (optimize || !target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_extract (target, op0, elt);
+
+ return target;
+}
+
/* Expand the builtin in EXP and store the result in TARGET. Store
true in *EXPANDEDP if we found a builtin to expand. */
static rtx
@@ -6151,6 +7399,14 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
enum machine_mode tmode, mode0;
unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ if (fcode >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && fcode <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
+ {
+ *expandedp = true;
+ error ("unresolved overload for Altivec builtin %qF", fndecl);
+ return const0_rtx;
+ }
+
target = altivec_expand_ld_builtin (exp, target, expandedp);
if (*expandedp)
return target;
@@ -6186,7 +7442,7 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
|| GET_MODE (target) != tmode
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
-
+
pat = GEN_FCN (icode) (target);
if (! pat)
return 0;
@@ -6196,7 +7452,7 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
case ALTIVEC_BUILTIN_MTVSCR:
icode = CODE_FOR_altivec_mtvscr;
arg0 = TREE_VALUE (arglist);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
mode0 = insn_data[icode].operand[0].mode;
/* If we got invalid arguments bail out before generating bad rtl. */
@@ -6219,7 +7475,7 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
icode = CODE_FOR_altivec_dss;
arg0 = TREE_VALUE (arglist);
STRIP_NOPS (arg0);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
mode0 = insn_data[icode].operand[0].mode;
/* If we got invalid arguments bail out before generating bad rtl. */
@@ -6239,14 +7495,27 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
emit_insn (gen_altivec_dss (op0));
return NULL_RTX;
- case ALTIVEC_BUILTIN_COMPILETIME_ERROR:
- arg0 = TREE_VALUE (arglist);
- while (TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == ADDR_EXPR)
- arg0 = TREE_OPERAND (arg0, 0);
- error ("invalid parameter combination for `%s' AltiVec intrinsic",
- TREE_STRING_POINTER (arg0));
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
+ return altivec_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
- return const0_rtx;
+ case ALTIVEC_BUILTIN_VEC_SET_V4SI:
+ case ALTIVEC_BUILTIN_VEC_SET_V8HI:
+ case ALTIVEC_BUILTIN_VEC_SET_V16QI:
+ case ALTIVEC_BUILTIN_VEC_SET_V4SF:
+ return altivec_expand_vec_set_builtin (arglist);
+
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
+ return altivec_expand_vec_ext_builtin (arglist, target);
+
+ default:
+ break;
+ /* Fall through. */
}
/* Expand abs* operations. */
@@ -6259,32 +7528,33 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
dp = (struct builtin_description_predicates *) bdesc_altivec_preds;
for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
if (dp->code == fcode)
- return altivec_expand_predicate_builtin (dp->icode, dp->opcode, arglist, target);
+ return altivec_expand_predicate_builtin (dp->icode, dp->opcode,
+ arglist, target);
/* LV* are funky. We initialized them differently. */
switch (fcode)
{
case ALTIVEC_BUILTIN_LVSL:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
- arglist, target);
+ arglist, target);
case ALTIVEC_BUILTIN_LVSR:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
- arglist, target);
+ arglist, target);
case ALTIVEC_BUILTIN_LVEBX:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
- arglist, target);
+ arglist, target);
case ALTIVEC_BUILTIN_LVEHX:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
- arglist, target);
+ arglist, target);
case ALTIVEC_BUILTIN_LVEWX:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
- arglist, target);
+ arglist, target);
case ALTIVEC_BUILTIN_LVXL:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
- arglist, target);
+ arglist, target);
case ALTIVEC_BUILTIN_LVX:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx,
- arglist, target);
+ arglist, target);
default:
break;
/* Fall through. */
@@ -6430,7 +7700,7 @@ spe_expand_builtin (tree exp, rtx target, bool *expandedp)
|| GET_MODE (target) != tmode
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
-
+
pat = GEN_FCN (icode) (target);
if (! pat)
return 0;
@@ -6439,7 +7709,7 @@ spe_expand_builtin (tree exp, rtx target, bool *expandedp)
case SPE_BUILTIN_MTSPEFSCR:
icode = CODE_FOR_spe_mtspefscr;
arg0 = TREE_VALUE (arglist);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
mode0 = insn_data[icode].operand[0].mode;
if (arg0 == error_mark_node)
@@ -6467,8 +7737,8 @@ spe_expand_predicate_builtin (enum insn_code icode, tree arglist, rtx target)
tree form = TREE_VALUE (arglist);
tree arg0 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
int form_int;
@@ -6482,8 +7752,7 @@ spe_expand_predicate_builtin (enum insn_code icode, tree arglist, rtx target)
else
form_int = TREE_INT_CST_LOW (form);
- if (mode0 != mode1)
- abort ();
+ gcc_assert (mode0 == mode1);
if (arg0 == error_mark_node || arg1 == error_mark_node)
return const0_rtx;
@@ -6532,7 +7801,7 @@ spe_expand_predicate_builtin (enum insn_code icode, tree arglist, rtx target)
case 0:
/* We need to get to the OV bit, which is the ORDERED bit. We
could generate (ordered:SI (reg:CC xx) (const_int 0)), but
- that's ugly and will trigger a validate_condition_mode abort.
+ that's ugly and will make validate_condition_mode die.
So let's just use another pattern. */
emit_insn (gen_move_from_CR_ov_bit (target, scratch));
return target;
@@ -6577,15 +7846,14 @@ spe_expand_evsel_builtin (enum insn_code icode, tree arglist, rtx target)
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
tree arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
- rtx op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx op3 = expand_normal (arg3);
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
- if (mode0 != mode1)
- abort ();
+ gcc_assert (mode0 == mode1);
if (arg0 == error_mark_node || arg1 == error_mark_node
|| arg2 == error_mark_node || arg3 == error_mark_node)
@@ -6628,8 +7896,8 @@ spe_expand_evsel_builtin (enum insn_code icode, tree arglist, rtx target)
static rtx
rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- int ignore ATTRIBUTE_UNUSED)
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
tree arglist = TREE_OPERAND (exp, 1);
@@ -6638,7 +7906,47 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
size_t i;
rtx ret;
bool success;
-
+
+ if (fcode == ALTIVEC_BUILTIN_MASK_FOR_LOAD
+ || fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
+ {
+ int icode = (int) CODE_FOR_altivec_lvsr;
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode = insn_data[icode].operand[1].mode;
+ tree arg;
+ rtx op, addr, pat;
+
+ gcc_assert (TARGET_ALTIVEC);
+
+ arg = TREE_VALUE (arglist);
+ gcc_assert (TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE);
+ op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
+ addr = memory_address (mode, op);
+ if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
+ op = addr;
+ else
+ {
+ /* For the load case need to negate the address. */
+ op = gen_reg_rtx (GET_MODE (addr));
+ emit_insn (gen_rtx_SET (VOIDmode, op,
+ gen_rtx_NEG (GET_MODE (addr), addr)));
+ }
+ op = gen_rtx_MEM (mode, op);
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ /*pat = gen_altivec_lvsr (target, op);*/
+ pat = GEN_FCN (icode) (target, op);
+ if (!pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+ }
+
if (TARGET_ALTIVEC)
{
ret = altivec_expand_builtin (exp, target, &success);
@@ -6654,49 +7962,76 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
return ret;
}
- if (TARGET_ALTIVEC || TARGET_SPE)
- {
- /* Handle simple unary operations. */
- d = (struct builtin_description *) bdesc_1arg;
- for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
- if (d->code == fcode)
- return rs6000_expand_unop_builtin (d->icode, arglist, target);
+ gcc_assert (TARGET_ALTIVEC || TARGET_SPE);
- /* Handle simple binary operations. */
- d = (struct builtin_description *) bdesc_2arg;
- for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
- if (d->code == fcode)
- return rs6000_expand_binop_builtin (d->icode, arglist, target);
+ /* Handle simple unary operations. */
+ d = (struct builtin_description *) bdesc_1arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_unop_builtin (d->icode, arglist, target);
- /* Handle simple ternary operations. */
- d = (struct builtin_description *) bdesc_3arg;
- for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
- if (d->code == fcode)
- return rs6000_expand_ternop_builtin (d->icode, arglist, target);
- }
+ /* Handle simple binary operations. */
+ d = (struct builtin_description *) bdesc_2arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_binop_builtin (d->icode, arglist, target);
- abort ();
- return NULL_RTX;
+ /* Handle simple ternary operations. */
+ d = (struct builtin_description *) bdesc_3arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_ternop_builtin (d->icode, arglist, target);
+
+ gcc_unreachable ();
+}
+
+static tree
+build_opaque_vector_type (tree node, int nunits)
+{
+ node = copy_node (node);
+ TYPE_MAIN_VARIANT (node) = node;
+ return build_vector_type (node, nunits);
}
static void
rs6000_init_builtins (void)
{
- opaque_V2SI_type_node = copy_node (V2SI_type_node);
- opaque_V2SF_type_node = copy_node (V2SF_type_node);
+ V2SI_type_node = build_vector_type (intSI_type_node, 2);
+ V2SF_type_node = build_vector_type (float_type_node, 2);
+ V4HI_type_node = build_vector_type (intHI_type_node, 4);
+ V4SI_type_node = build_vector_type (intSI_type_node, 4);
+ V4SF_type_node = build_vector_type (float_type_node, 4);
+ V8HI_type_node = build_vector_type (intHI_type_node, 8);
+ V16QI_type_node = build_vector_type (intQI_type_node, 16);
+
+ unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
+ unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
+ unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
+
+ opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
+ opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
+ opaque_V4SI_type_node = copy_node (V4SI_type_node);
/* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
- types, especially in C++ land. Similarly, 'vector pixel' is distinct from+ 'vector unsigned short'. */
-
- bool_char_type_node = copy_node (unsigned_intQI_type_node);
- TYPE_MAIN_VARIANT (bool_char_type_node) = bool_char_type_node;
- bool_short_type_node = copy_node (unsigned_intHI_type_node);
- TYPE_MAIN_VARIANT (bool_short_type_node) = bool_short_type_node;
- bool_int_type_node = copy_node (unsigned_intSI_type_node);
- TYPE_MAIN_VARIANT (bool_int_type_node) = bool_int_type_node;
- pixel_type_node = copy_node (unsigned_intHI_type_node);
- TYPE_MAIN_VARIANT (pixel_type_node) = pixel_type_node;
+ types, especially in C++ land. Similarly, 'vector pixel' is distinct from
+ 'vector unsigned short'. */
+
+ bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
+ bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
+ bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
+ pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
+
+ long_integer_type_internal_node = long_integer_type_node;
+ long_unsigned_type_internal_node = long_unsigned_type_node;
+ intQI_type_internal_node = intQI_type_node;
+ uintQI_type_internal_node = unsigned_intQI_type_node;
+ intHI_type_internal_node = intHI_type_node;
+ uintHI_type_internal_node = unsigned_intHI_type_node;
+ intSI_type_internal_node = intSI_type_node;
+ uintSI_type_internal_node = unsigned_intSI_type_node;
+ float_type_internal_node = float_type_node;
+ void_type_internal_node = void_type_node;
(*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
get_identifier ("__bool char"),
@@ -6711,10 +8046,10 @@ rs6000_init_builtins (void)
get_identifier ("__pixel"),
pixel_type_node));
- bool_V16QI_type_node = make_vector (V16QImode, bool_char_type_node, 1);
- bool_V8HI_type_node = make_vector (V8HImode, bool_short_type_node, 1);
- bool_V4SI_type_node = make_vector (V4SImode, bool_int_type_node, 1);
- pixel_V8HI_type_node = make_vector (V8HImode, pixel_type_node, 1);
+ bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
+ bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
+ bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
+ pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
(*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
get_identifier ("__vector unsigned char"),
@@ -6759,6 +8094,12 @@ rs6000_init_builtins (void)
altivec_init_builtins ();
if (TARGET_ALTIVEC || TARGET_SPE)
rs6000_common_init_builtins ();
+
+#if TARGET_XCOFF
+ /* AIX libm provides clog as __clog. */
+ if (built_in_decls [BUILT_IN_CLOG])
+ set_user_assembler_name (built_in_decls [BUILT_IN_CLOG], "__clog");
+#endif
}
/* Search through a set of builtins and enable the mask bits.
@@ -6768,7 +8109,7 @@ rs6000_init_builtins (void)
END is the builtin enum at which to end. */
static void
enable_mask_for_builtins (struct builtin_description *desc, int size,
- enum rs6000_builtins start,
+ enum rs6000_builtins start,
enum rs6000_builtins end)
{
int i;
@@ -6921,7 +8262,7 @@ spe_init_builtins (void)
opaque_V2SI_type_node));
/* Initialize irregular SPE builtins. */
-
+
def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
@@ -6980,7 +8321,7 @@ spe_init_builtins (void)
type = int_ftype_int_v2sf_v2sf;
break;
default:
- abort ();
+ gcc_unreachable ();
}
def_builtin (d->mask, d->name, type, d->code);
@@ -7001,7 +8342,7 @@ spe_init_builtins (void)
type = v2sf_ftype_4_v2sf;
break;
default:
- abort ();
+ gcc_unreachable ();
}
def_builtin (d->mask, d->name, type, d->code);
@@ -7014,6 +8355,8 @@ altivec_init_builtins (void)
struct builtin_description *d;
struct builtin_description_predicates *dp;
size_t i;
+ tree ftype;
+
tree pfloat_type_node = build_pointer_type (float_type_node);
tree pint_type_node = build_pointer_type (integer_type_node);
tree pshort_type_node = build_pointer_type (short_integer_type_node);
@@ -7028,6 +8371,21 @@ altivec_init_builtins (void)
tree pcvoid_type_node = build_pointer_type (build_qualified_type (void_type_node, TYPE_QUAL_CONST));
+ tree int_ftype_opaque
+ = build_function_type_list (integer_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+
+ tree opaque_ftype_opaque_int
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, integer_type_node, NULL_TREE);
+ tree opaque_ftype_opaque_opaque_int
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ tree int_ftype_int_opaque_opaque
+ = build_function_type_list (integer_type_node,
+ integer_type_node, opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
tree int_ftype_int_v4si_v4si
= build_function_type_list (integer_type_node,
integer_type_node, V4SI_type_node,
@@ -7061,6 +8419,9 @@ altivec_init_builtins (void)
tree void_ftype_int
= build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
+ tree opaque_ftype_long_pcvoid
+ = build_function_type_list (opaque_V4SI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
tree v16qi_ftype_long_pcvoid
= build_function_type_list (V16QI_type_node,
long_integer_type_node, pcvoid_type_node, NULL_TREE);
@@ -7071,6 +8432,10 @@ altivec_init_builtins (void)
= build_function_type_list (V4SI_type_node,
long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ tree void_ftype_opaque_long_pvoid
+ = build_function_type_list (void_type_node,
+ opaque_V4SI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
tree void_ftype_v4si_long_pvoid
= build_function_type_list (void_type_node,
V4SI_type_node, long_integer_type_node,
@@ -7107,10 +8472,7 @@ altivec_init_builtins (void)
= build_function_type_list (void_type_node,
pcvoid_type_node, integer_type_node,
integer_type_node, NULL_TREE);
- tree int_ftype_pcchar
- = build_function_type_list (integer_type_node,
- pcchar_type_node, NULL_TREE);
-
+
def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf,
@@ -7143,10 +8505,33 @@ altivec_init_builtins (void)
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
-
- /* See altivec.h for usage of "__builtin_altivec_compiletime_error". */
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_compiletime_error", int_ftype_pcchar,
- ALTIVEC_BUILTIN_COMPILETIME_ERROR);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
/* Add the DST variants. */
d = (struct builtin_description *) bdesc_dst;
@@ -7159,11 +8544,19 @@ altivec_init_builtins (void)
{
enum machine_mode mode1;
tree type;
+ bool is_overloaded = dp->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && dp->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
- mode1 = insn_data[dp->icode].operand[1].mode;
+ if (is_overloaded)
+ mode1 = VOIDmode;
+ else
+ mode1 = insn_data[dp->icode].operand[1].mode;
switch (mode1)
{
+ case VOIDmode:
+ type = int_ftype_int_opaque_opaque;
+ break;
case V4SImode:
type = int_ftype_int_v4si_v4si;
break;
@@ -7177,9 +8570,9 @@ altivec_init_builtins (void)
type = int_ftype_int_v4sf_v4sf;
break;
default:
- abort ();
+ gcc_unreachable ();
}
-
+
def_builtin (dp->mask, dp->name, type, dp->code);
}
@@ -7207,11 +8600,110 @@ altivec_init_builtins (void)
type = v4sf_ftype_v4sf;
break;
default:
- abort ();
+ gcc_unreachable ();
}
-
+
def_builtin (d->mask, d->name, type, d->code);
}
+
+ if (TARGET_ALTIVEC)
+ {
+ tree decl;
+
+ /* Initialize target builtin that implements
+ targetm.vectorize.builtin_mask_for_load. */
+
+ decl = lang_hooks.builtin_function ("__builtin_altivec_mask_for_load",
+ v16qi_ftype_long_pcvoid,
+ ALTIVEC_BUILTIN_MASK_FOR_LOAD,
+ BUILT_IN_MD, NULL,
+ tree_cons (get_identifier ("const"),
+ NULL_TREE, NULL_TREE));
+ /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
+ altivec_builtin_mask_for_load = decl;
+ }
+
+ /* Access to the vec_init patterns. */
+ ftype = build_function_type_list (V4SI_type_node, integer_type_node,
+ integer_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V8HI);
+
+ ftype = build_function_type_list (V16QI_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, float_type_node,
+ float_type_node, float_type_node,
+ float_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SF);
+
+ /* Access to the vec_set patterns. */
+ ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
+ intSI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
+ intHI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V8HI);
+
+ ftype = build_function_type_list (V8HI_type_node, V16QI_type_node,
+ intQI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
+ float_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SF);
+
+ /* Access to the vec_extract patterns. */
+ ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SI);
+
+ ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V8HI);
+
+ ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V16QI);
+
+ ftype = build_function_type_list (float_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SF);
}
static void
@@ -7262,6 +8754,10 @@ rs6000_common_init_builtins (void)
integer_type_node, integer_type_node,
NULL_TREE);
+ tree opaque_ftype_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+
tree v2si_ftype_v2si
= build_function_type_list (opaque_V2SI_type_node,
opaque_V2SI_type_node, NULL_TREE);
@@ -7269,7 +8765,7 @@ rs6000_common_init_builtins (void)
tree v2sf_ftype_v2sf
= build_function_type_list (opaque_V2SF_type_node,
opaque_V2SF_type_node, NULL_TREE);
-
+
tree v2sf_ftype_v2si
= build_function_type_list (opaque_V2SF_type_node,
opaque_V2SI_type_node, NULL_TREE);
@@ -7296,6 +8792,9 @@ rs6000_common_init_builtins (void)
integer_type_node, integer_type_node,
NULL_TREE);
+ tree opaque_ftype_opaque_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node, NULL_TREE);
tree v4si_ftype_v4si_v4si
= build_function_type_list (V4SI_type_node,
V4SI_type_node, V4SI_type_node, NULL_TREE);
@@ -7333,6 +8832,10 @@ rs6000_common_init_builtins (void)
tree v4sf_ftype_v4sf_v4sf
= build_function_type_list (V4SF_type_node,
V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree opaque_ftype_opaque_opaque_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
tree v4sf_ftype_v4sf_v4sf_v4si
= build_function_type_list (V4SF_type_node,
V4SF_type_node, V4SF_type_node,
@@ -7341,7 +8844,7 @@ rs6000_common_init_builtins (void)
= build_function_type_list (V4SF_type_node,
V4SF_type_node, V4SF_type_node,
V4SF_type_node, NULL_TREE);
- tree v4si_ftype_v4si_v4si_v4si
+ tree v4si_ftype_v4si_v4si_v4si
= build_function_type_list (V4SI_type_node,
V4SI_type_node, V4SI_type_node,
V4SI_type_node, NULL_TREE);
@@ -7352,11 +8855,11 @@ rs6000_common_init_builtins (void)
= build_function_type_list (V8HI_type_node,
V8HI_type_node, V8HI_type_node,
V8HI_type_node, NULL_TREE);
- tree v4si_ftype_v8hi_v8hi_v4si
+ tree v4si_ftype_v8hi_v8hi_v4si
= build_function_type_list (V4SI_type_node,
V8HI_type_node, V8HI_type_node,
V4SI_type_node, NULL_TREE);
- tree v4si_ftype_v16qi_v16qi_v4si
+ tree v4si_ftype_v16qi_v16qi_v4si
= build_function_type_list (V4SI_type_node,
V16QI_type_node, V16QI_type_node,
V4SI_type_node, NULL_TREE);
@@ -7406,23 +8909,37 @@ rs6000_common_init_builtins (void)
d = (struct builtin_description *) bdesc_3arg;
for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
{
-
enum machine_mode mode0, mode1, mode2, mode3;
tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ mode2 = VOIDmode;
+ mode3 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+ mode3 = insn_data[d->icode].operand[3].mode;
+ }
- if (d->name == 0 || d->icode == CODE_FOR_nothing)
- continue;
-
- mode0 = insn_data[d->icode].operand[0].mode;
- mode1 = insn_data[d->icode].operand[1].mode;
- mode2 = insn_data[d->icode].operand[2].mode;
- mode3 = insn_data[d->icode].operand[3].mode;
-
/* When all four are of the same mode. */
if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
{
switch (mode0)
{
+ case VOIDmode:
+ type = opaque_ftype_opaque_opaque_opaque;
+ break;
case V4SImode:
type = v4si_ftype_v4si_v4si_v4si;
break;
@@ -7431,16 +8948,16 @@ rs6000_common_init_builtins (void)
break;
case V8HImode:
type = v8hi_ftype_v8hi_v8hi_v8hi;
- break;
+ break;
case V16QImode:
type = v16qi_ftype_v16qi_v16qi_v16qi;
- break;
+ break;
default:
- abort();
+ gcc_unreachable ();
}
}
else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
- {
+ {
switch (mode0)
{
case V4SImode:
@@ -7451,21 +8968,21 @@ rs6000_common_init_builtins (void)
break;
case V8HImode:
type = v8hi_ftype_v8hi_v8hi_v16qi;
- break;
+ break;
case V16QImode:
type = v16qi_ftype_v16qi_v16qi_v16qi;
- break;
+ break;
default:
- abort();
+ gcc_unreachable ();
}
}
- else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
&& mode3 == V4SImode)
type = v4si_ftype_v16qi_v16qi_v4si;
- else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
&& mode3 == V4SImode)
type = v4si_ftype_v8hi_v8hi_v4si;
- else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
+ else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
&& mode3 == V4SImode)
type = v4sf_ftype_v4sf_v4sf_v4si;
@@ -7490,7 +9007,7 @@ rs6000_common_init_builtins (void)
type = v4sf_ftype_v4sf_v4sf_int;
else
- abort ();
+ gcc_unreachable ();
def_builtin (d->mask, d->name, type, d->code);
}
@@ -7501,19 +9018,33 @@ rs6000_common_init_builtins (void)
{
enum machine_mode mode0, mode1, mode2;
tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
- if (d->name == 0 || d->icode == CODE_FOR_nothing)
- continue;
-
- mode0 = insn_data[d->icode].operand[0].mode;
- mode1 = insn_data[d->icode].operand[1].mode;
- mode2 = insn_data[d->icode].operand[2].mode;
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ mode2 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+ }
/* When all three operands are of the same mode. */
if (mode0 == mode1 && mode1 == mode2)
{
switch (mode0)
{
+ case VOIDmode:
+ type = opaque_ftype_opaque_opaque;
+ break;
case V4SFmode:
type = v4sf_ftype_v4sf_v4sf;
break;
@@ -7536,7 +9067,7 @@ rs6000_common_init_builtins (void)
type = int_ftype_int_int;
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
@@ -7573,15 +9104,15 @@ rs6000_common_init_builtins (void)
/* vint, vshort, vint. */
else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
type = v4si_ftype_v8hi_v4si;
-
+
/* vint, vint, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
type = v4si_ftype_v4si_int;
-
+
/* vshort, vshort, 5 bit literal. */
else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
type = v8hi_ftype_v8hi_int;
-
+
/* vchar, vchar, 5 bit literal. */
else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
type = v16qi_ftype_v16qi_int;
@@ -7589,7 +9120,7 @@ rs6000_common_init_builtins (void)
/* vfloat, vint, 5 bit literal. */
else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
type = v4sf_ftype_v4si_int;
-
+
/* vint, vfloat, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
type = v4si_ftype_v4sf_int;
@@ -7603,9 +9134,10 @@ rs6000_common_init_builtins (void)
else if (mode0 == V2SImode && mode1 == SImode && mode2 == QImode)
type = v2si_ftype_int_char;
- /* int, x, x. */
- else if (mode0 == SImode)
+ else
{
+ /* int, x, x. */
+ gcc_assert (mode0 == SImode);
switch (mode1)
{
case V4SImode:
@@ -7621,13 +9153,10 @@ rs6000_common_init_builtins (void)
type = int_ftype_v8hi_v8hi;
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
- else
- abort ();
-
def_builtin (d->mask, d->name, type, d->code);
}
@@ -7637,19 +9166,31 @@ rs6000_common_init_builtins (void)
{
enum machine_mode mode0, mode1;
tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
- if (d->name == 0 || d->icode == CODE_FOR_nothing)
- continue;
-
- mode0 = insn_data[d->icode].operand[0].mode;
- mode1 = insn_data[d->icode].operand[1].mode;
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ }
if (mode0 == V4SImode && mode1 == QImode)
- type = v4si_ftype_int;
+ type = v4si_ftype_int;
else if (mode0 == V8HImode && mode1 == QImode)
- type = v8hi_ftype_int;
+ type = v8hi_ftype_int;
else if (mode0 == V16QImode && mode1 == QImode)
- type = v16qi_ftype_int;
+ type = v16qi_ftype_int;
+ else if (mode0 == VOIDmode && mode1 == VOIDmode)
+ type = opaque_ftype_opaque;
else if (mode0 == V4SFmode && mode1 == V4SFmode)
type = v4sf_ftype_v4sf;
else if (mode0 == V8HImode && mode1 == V16QImode)
@@ -7667,7 +9208,7 @@ rs6000_common_init_builtins (void)
else if (mode0 == V2SImode && mode1 == QImode)
type = v2si_ftype_char;
else
- abort ();
+ gcc_unreachable ();
def_builtin (d->mask, d->name, type, d->code);
}
@@ -7676,36 +9217,53 @@ rs6000_common_init_builtins (void)
static void
rs6000_init_libfuncs (void)
{
- if (!TARGET_HARD_FLOAT)
- return;
-
- if (DEFAULT_ABI != ABI_V4)
+ if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
+ && !TARGET_POWER2 && !TARGET_POWERPC)
{
- if (TARGET_XCOFF && ! TARGET_POWER2 && ! TARGET_POWERPC)
- {
- /* AIX library routines for float->int conversion. */
- set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
- set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
- set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
- set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
- }
+ /* AIX library routines for float->int conversion. */
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
+ }
+ if (!TARGET_IEEEQUAD)
/* AIX/Darwin/64-bit Linux quad floating point routines. */
- if (!TARGET_XL_COMPAT)
- {
- set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
- set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
- set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
- set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
- }
- else
- {
- set_optab_libfunc (add_optab, TFmode, "_xlqadd");
- set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
- set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
- set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
- }
- }
+ if (!TARGET_XL_COMPAT)
+ {
+ set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
+ set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
+ set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
+
+ if (TARGET_SOFT_FLOAT)
+ {
+ set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
+ set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
+ set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
+ set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
+ set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
+ set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
+ set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
+ set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
+
+ set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
+ set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
+ set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
+ set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
+ set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
+ }
+ }
+ else
+ {
+ set_optab_libfunc (add_optab, TFmode, "_xlqadd");
+ set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
+ set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
+ }
else
{
/* 32-bit SVR4 quad floating point routines. */
@@ -7732,8 +9290,102 @@ rs6000_init_libfuncs (void)
set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
+ }
+}
+
+
+/* Expand a block clear operation, and return 1 if successful. Return 0
+ if we should let the compiler generate normal code.
+
+ operands[0] is the destination
+ operands[1] is the length
+ operands[3] is the alignment */
+
+int
+expand_block_clear (rtx operands[])
+{
+ rtx orig_dest = operands[0];
+ rtx bytes_rtx = operands[1];
+ rtx align_rtx = operands[3];
+ bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
+ HOST_WIDE_INT align;
+ HOST_WIDE_INT bytes;
+ int offset;
+ int clear_bytes;
+ int clear_step;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (! constp)
+ return 0;
+
+ /* This must be a fixed size alignment */
+ gcc_assert (GET_CODE (align_rtx) == CONST_INT);
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
+
+ /* Anything to clear? */
+ bytes = INTVAL (bytes_rtx);
+ if (bytes <= 0)
+ return 1;
+
+ /* Use the builtin memset after a point, to avoid huge code bloat.
+ When optimize_size, avoid any significant code bloat; calling
+ memset is about 4 instructions, so allow for one instruction to
+ load zero and three to do clearing. */
+ if (TARGET_ALTIVEC && align >= 128)
+ clear_step = 16;
+ else if (TARGET_POWERPC64 && align >= 32)
+ clear_step = 8;
+ else
+ clear_step = 4;
+
+ if (optimize_size && bytes > 3 * clear_step)
+ return 0;
+ if (! optimize_size && bytes > 8 * clear_step)
+ return 0;
+
+ for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
+ {
+ enum machine_mode mode = BLKmode;
+ rtx dest;
+
+ if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
+ {
+ clear_bytes = 16;
+ mode = V4SImode;
+ }
+ else if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
+ {
+ clear_bytes = 8;
+ mode = DImode;
+ }
+ else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
+ { /* move 4 bytes */
+ clear_bytes = 4;
+ mode = SImode;
+ }
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
+ { /* move 2 bytes */
+ clear_bytes = 2;
+ mode = HImode;
+ }
+ else /* move 1 byte at a time */
+ {
+ clear_bytes = 1;
+ mode = QImode;
+ }
+
+ dest = adjust_address (orig_dest, mode, offset);
+
+ emit_move_insn (dest, CONST0_RTX (mode));
}
+
+ return 1;
}
+
/* Expand a block move operation, and return 1 if successful. Return 0
if we should let the compiler generate normal code.
@@ -7764,10 +9416,9 @@ expand_block_move (rtx operands[])
if (! constp)
return 0;
- /* If this is not a fixed size alignment, abort */
- if (GET_CODE (align_rtx) != CONST_INT)
- abort ();
- align = INTVAL (align_rtx);
+ /* This must be a fixed size alignment */
+ gcc_assert (GET_CODE (align_rtx) == CONST_INT);
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
/* Anything to move? */
bytes = INTVAL (bytes_rtx);
@@ -7775,20 +9426,28 @@ expand_block_move (rtx operands[])
return 1;
/* store_one_arg depends on expand_block_move to handle at least the size of
- reg_parm_stack_space. */
+ reg_parm_stack_space. */
if (bytes > (TARGET_POWERPC64 ? 64 : 32))
return 0;
for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
{
union {
- rtx (*movstrsi) (rtx, rtx, rtx, rtx);
+ rtx (*movmemsi) (rtx, rtx, rtx, rtx);
rtx (*mov) (rtx, rtx);
} gen_func;
enum machine_mode mode = BLKmode;
rtx src, dest;
-
- if (TARGET_STRING
+
+ /* Altivec first, since it will be faster than a string move
+ when it applies, and usually not significantly larger. */
+ if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
+ {
+ move_bytes = 16;
+ mode = V4SImode;
+ gen_func.mov = gen_movv4si;
+ }
+ else if (TARGET_STRING
&& bytes > 24 /* move up to 32 bytes at a time */
&& ! fixed_regs[5]
&& ! fixed_regs[6]
@@ -7800,7 +9459,7 @@ expand_block_move (rtx operands[])
&& ! fixed_regs[12])
{
move_bytes = (bytes > 32) ? 32 : bytes;
- gen_func.movstrsi = gen_movstrsi_8reg;
+ gen_func.movmemsi = gen_movmemsi_8reg;
}
else if (TARGET_STRING
&& bytes > 16 /* move up to 24 bytes at a time */
@@ -7812,7 +9471,7 @@ expand_block_move (rtx operands[])
&& ! fixed_regs[10])
{
move_bytes = (bytes > 24) ? 24 : bytes;
- gen_func.movstrsi = gen_movstrsi_6reg;
+ gen_func.movmemsi = gen_movmemsi_6reg;
}
else if (TARGET_STRING
&& bytes > 8 /* move up to 16 bytes at a time */
@@ -7822,12 +9481,12 @@ expand_block_move (rtx operands[])
&& ! fixed_regs[8])
{
move_bytes = (bytes > 16) ? 16 : bytes;
- gen_func.movstrsi = gen_movstrsi_4reg;
+ gen_func.movmemsi = gen_movmemsi_4reg;
}
else if (bytes >= 8 && TARGET_POWERPC64
/* 64-bit loads and stores require word-aligned
displacements. */
- && (align >= 8 || (! STRICT_ALIGNMENT && align >= 4)))
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
{
move_bytes = 8;
mode = DImode;
@@ -7836,15 +9495,15 @@ expand_block_move (rtx operands[])
else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
{ /* move up to 8 bytes at a time */
move_bytes = (bytes > 8) ? 8 : bytes;
- gen_func.movstrsi = gen_movstrsi_2reg;
+ gen_func.movmemsi = gen_movmemsi_2reg;
}
- else if (bytes >= 4 && (align >= 4 || ! STRICT_ALIGNMENT))
+ else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
{ /* move 4 bytes */
move_bytes = 4;
mode = SImode;
gen_func.mov = gen_movsi;
}
- else if (bytes >= 2 && (align >= 2 || ! STRICT_ALIGNMENT))
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
{ /* move 2 bytes */
move_bytes = 2;
mode = HImode;
@@ -7853,7 +9512,7 @@ expand_block_move (rtx operands[])
else if (TARGET_STRING && bytes > 1)
{ /* move up to 4 bytes at a time */
move_bytes = (bytes > 4) ? 4 : bytes;
- gen_func.movstrsi = gen_movstrsi_1reg;
+ gen_func.movmemsi = gen_movmemsi_1reg;
}
else /* move 1 byte at a time */
{
@@ -7861,14 +9520,14 @@ expand_block_move (rtx operands[])
mode = QImode;
gen_func.mov = gen_movqi;
}
-
+
src = adjust_address (orig_src, mode, offset);
dest = adjust_address (orig_dest, mode, offset);
-
- if (mode != BLKmode)
+
+ if (mode != BLKmode)
{
rtx tmp_reg = gen_reg_rtx (mode);
-
+
emit_insn ((*gen_func.mov) (tmp_reg, src));
stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
}
@@ -7883,7 +9542,7 @@ expand_block_move (rtx operands[])
if (mode == BLKmode)
{
- /* Move the address into scratch registers. The movstrsi
+ /* Move the address into scratch registers. The movmemsi
patterns require zero offset. */
if (!REG_P (XEXP (src, 0)))
{
@@ -7891,15 +9550,15 @@ expand_block_move (rtx operands[])
src = replace_equiv_address (src, src_reg);
}
set_mem_size (src, GEN_INT (move_bytes));
-
+
if (!REG_P (XEXP (dest, 0)))
{
rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
dest = replace_equiv_address (dest, dest_reg);
}
set_mem_size (dest, GEN_INT (move_bytes));
-
- emit_insn ((*gen_func.movstrsi) (dest, src,
+
+ emit_insn ((*gen_func.movmemsi) (dest, src,
GEN_INT (move_bytes & 31),
align_rtx));
}
@@ -7909,88 +9568,6 @@ expand_block_move (rtx operands[])
}
-/* Return 1 if OP is a load multiple operation. It is known to be a
- PARALLEL and the first section will be tested. */
-
-int
-load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- int count = XVECLEN (op, 0);
- unsigned int dest_regno;
- rtx src_addr;
- int i;
-
- /* Perform a quick check so we don't blow up below. */
- if (count <= 1
- || GET_CODE (XVECEXP (op, 0, 0)) != SET
- || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
- || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
- return 0;
-
- dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
- src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
-
- for (i = 1; i < count; i++)
- {
- rtx elt = XVECEXP (op, 0, i);
-
- if (GET_CODE (elt) != SET
- || GET_CODE (SET_DEST (elt)) != REG
- || GET_MODE (SET_DEST (elt)) != SImode
- || REGNO (SET_DEST (elt)) != dest_regno + i
- || GET_CODE (SET_SRC (elt)) != MEM
- || GET_MODE (SET_SRC (elt)) != SImode
- || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
- || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
- || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
- || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
- return 0;
- }
-
- return 1;
-}
-
-/* Similar, but tests for store multiple. Here, the second vector element
- is a CLOBBER. It will be tested later. */
-
-int
-store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- int count = XVECLEN (op, 0) - 1;
- unsigned int src_regno;
- rtx dest_addr;
- int i;
-
- /* Perform a quick check so we don't blow up below. */
- if (count <= 1
- || GET_CODE (XVECEXP (op, 0, 0)) != SET
- || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
- || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
- return 0;
-
- src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
- dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
-
- for (i = 1; i < count; i++)
- {
- rtx elt = XVECEXP (op, 0, i + 1);
-
- if (GET_CODE (elt) != SET
- || GET_CODE (SET_SRC (elt)) != REG
- || GET_MODE (SET_SRC (elt)) != SImode
- || REGNO (SET_SRC (elt)) != src_regno + i
- || GET_CODE (SET_DEST (elt)) != MEM
- || GET_MODE (SET_DEST (elt)) != SImode
- || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
- || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
- || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
- || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
- return 0;
- }
-
- return 1;
-}
-
/* Return a string to perform a load_multiple operation.
operands[0] is the vector.
operands[1] is the source address.
@@ -8048,405 +9625,43 @@ rs6000_output_load_multiple (rtx operands[3])
return "{lsi|lswi} %2,%1,%N0";
}
-/* Return 1 for a parallel vrsave operation. */
-
-int
-vrsave_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- int count = XVECLEN (op, 0);
- unsigned int dest_regno, src_regno;
- int i;
-
- if (count <= 1
- || GET_CODE (XVECEXP (op, 0, 0)) != SET
- || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
- || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC_VOLATILE)
- return 0;
-
- dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
- src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
-
- if (dest_regno != VRSAVE_REGNO
- && src_regno != VRSAVE_REGNO)
- return 0;
-
- for (i = 1; i < count; i++)
- {
- rtx elt = XVECEXP (op, 0, i);
-
- if (GET_CODE (elt) != CLOBBER
- && GET_CODE (elt) != SET)
- return 0;
- }
-
- return 1;
-}
-
-/* Return 1 for an PARALLEL suitable for mfcr. */
-
-int
-mfcr_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- int count = XVECLEN (op, 0);
- int i;
-
- /* Perform a quick check so we don't blow up below. */
- if (count < 1
- || GET_CODE (XVECEXP (op, 0, 0)) != SET
- || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
- || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
- return 0;
-
- for (i = 0; i < count; i++)
- {
- rtx exp = XVECEXP (op, 0, i);
- rtx unspec;
- int maskval;
- rtx src_reg;
-
- src_reg = XVECEXP (SET_SRC (exp), 0, 0);
-
- if (GET_CODE (src_reg) != REG
- || GET_MODE (src_reg) != CCmode
- || ! CR_REGNO_P (REGNO (src_reg)))
- return 0;
-
- if (GET_CODE (exp) != SET
- || GET_CODE (SET_DEST (exp)) != REG
- || GET_MODE (SET_DEST (exp)) != SImode
- || ! INT_REGNO_P (REGNO (SET_DEST (exp))))
- return 0;
- unspec = SET_SRC (exp);
- maskval = 1 << (MAX_CR_REGNO - REGNO (src_reg));
-
- if (GET_CODE (unspec) != UNSPEC
- || XINT (unspec, 1) != UNSPEC_MOVESI_FROM_CR
- || XVECLEN (unspec, 0) != 2
- || XVECEXP (unspec, 0, 0) != src_reg
- || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
- || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
- return 0;
- }
- return 1;
-}
-
-/* Return 1 for an PARALLEL suitable for mtcrf. */
-
-int
-mtcrf_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- int count = XVECLEN (op, 0);
- int i;
- rtx src_reg;
-
- /* Perform a quick check so we don't blow up below. */
- if (count < 1
- || GET_CODE (XVECEXP (op, 0, 0)) != SET
- || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
- || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
- return 0;
- src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0);
-
- if (GET_CODE (src_reg) != REG
- || GET_MODE (src_reg) != SImode
- || ! INT_REGNO_P (REGNO (src_reg)))
- return 0;
-
- for (i = 0; i < count; i++)
- {
- rtx exp = XVECEXP (op, 0, i);
- rtx unspec;
- int maskval;
-
- if (GET_CODE (exp) != SET
- || GET_CODE (SET_DEST (exp)) != REG
- || GET_MODE (SET_DEST (exp)) != CCmode
- || ! CR_REGNO_P (REGNO (SET_DEST (exp))))
- return 0;
- unspec = SET_SRC (exp);
- maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp)));
-
- if (GET_CODE (unspec) != UNSPEC
- || XINT (unspec, 1) != UNSPEC_MOVESI_TO_CR
- || XVECLEN (unspec, 0) != 2
- || XVECEXP (unspec, 0, 0) != src_reg
- || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
- || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
- return 0;
- }
- return 1;
-}
-
-/* Return 1 for an PARALLEL suitable for lmw. */
-
-int
-lmw_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- int count = XVECLEN (op, 0);
- unsigned int dest_regno;
- rtx src_addr;
- unsigned int base_regno;
- HOST_WIDE_INT offset;
- int i;
-
- /* Perform a quick check so we don't blow up below. */
- if (count <= 1
- || GET_CODE (XVECEXP (op, 0, 0)) != SET
- || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
- || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
- return 0;
-
- dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
- src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
-
- if (dest_regno > 31
- || count != 32 - (int) dest_regno)
- return 0;
-
- if (legitimate_indirect_address_p (src_addr, 0))
- {
- offset = 0;
- base_regno = REGNO (src_addr);
- if (base_regno == 0)
- return 0;
- }
- else if (legitimate_offset_address_p (SImode, src_addr, 0))
- {
- offset = INTVAL (XEXP (src_addr, 1));
- base_regno = REGNO (XEXP (src_addr, 0));
- }
- else
- return 0;
-
- for (i = 0; i < count; i++)
- {
- rtx elt = XVECEXP (op, 0, i);
- rtx newaddr;
- rtx addr_reg;
- HOST_WIDE_INT newoffset;
-
- if (GET_CODE (elt) != SET
- || GET_CODE (SET_DEST (elt)) != REG
- || GET_MODE (SET_DEST (elt)) != SImode
- || REGNO (SET_DEST (elt)) != dest_regno + i
- || GET_CODE (SET_SRC (elt)) != MEM
- || GET_MODE (SET_SRC (elt)) != SImode)
- return 0;
- newaddr = XEXP (SET_SRC (elt), 0);
- if (legitimate_indirect_address_p (newaddr, 0))
- {
- newoffset = 0;
- addr_reg = newaddr;
- }
- else if (legitimate_offset_address_p (SImode, newaddr, 0))
- {
- addr_reg = XEXP (newaddr, 0);
- newoffset = INTVAL (XEXP (newaddr, 1));
- }
- else
- return 0;
- if (REGNO (addr_reg) != base_regno
- || newoffset != offset + 4 * i)
- return 0;
- }
-
- return 1;
-}
-
-/* Return 1 for an PARALLEL suitable for stmw. */
-
-int
-stmw_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- int count = XVECLEN (op, 0);
- unsigned int src_regno;
- rtx dest_addr;
- unsigned int base_regno;
- HOST_WIDE_INT offset;
- int i;
-
- /* Perform a quick check so we don't blow up below. */
- if (count <= 1
- || GET_CODE (XVECEXP (op, 0, 0)) != SET
- || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
- || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
- return 0;
-
- src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
- dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
-
- if (src_regno > 31
- || count != 32 - (int) src_regno)
- return 0;
-
- if (legitimate_indirect_address_p (dest_addr, 0))
- {
- offset = 0;
- base_regno = REGNO (dest_addr);
- if (base_regno == 0)
- return 0;
- }
- else if (legitimate_offset_address_p (SImode, dest_addr, 0))
- {
- offset = INTVAL (XEXP (dest_addr, 1));
- base_regno = REGNO (XEXP (dest_addr, 0));
- }
- else
- return 0;
-
- for (i = 0; i < count; i++)
- {
- rtx elt = XVECEXP (op, 0, i);
- rtx newaddr;
- rtx addr_reg;
- HOST_WIDE_INT newoffset;
-
- if (GET_CODE (elt) != SET
- || GET_CODE (SET_SRC (elt)) != REG
- || GET_MODE (SET_SRC (elt)) != SImode
- || REGNO (SET_SRC (elt)) != src_regno + i
- || GET_CODE (SET_DEST (elt)) != MEM
- || GET_MODE (SET_DEST (elt)) != SImode)
- return 0;
- newaddr = XEXP (SET_DEST (elt), 0);
- if (legitimate_indirect_address_p (newaddr, 0))
- {
- newoffset = 0;
- addr_reg = newaddr;
- }
- else if (legitimate_offset_address_p (SImode, newaddr, 0))
- {
- addr_reg = XEXP (newaddr, 0);
- newoffset = INTVAL (XEXP (newaddr, 1));
- }
- else
- return 0;
- if (REGNO (addr_reg) != base_regno
- || newoffset != offset + 4 * i)
- return 0;
- }
-
- return 1;
-}
/* A validation routine: say whether CODE, a condition code, and MODE
match. The other alternatives either don't make sense or should
never be generated. */
-static void
+void
validate_condition_mode (enum rtx_code code, enum machine_mode mode)
{
- if (GET_RTX_CLASS (code) != '<'
- || GET_MODE_CLASS (mode) != MODE_CC)
- abort ();
+ gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
+ || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
+ && GET_MODE_CLASS (mode) == MODE_CC);
/* These don't make sense. */
- if ((code == GT || code == LT || code == GE || code == LE)
- && mode == CCUNSmode)
- abort ();
+ gcc_assert ((code != GT && code != LT && code != GE && code != LE)
+ || mode != CCUNSmode);
- if ((code == GTU || code == LTU || code == GEU || code == LEU)
- && mode != CCUNSmode)
- abort ();
+ gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
+ || mode == CCUNSmode);
- if (mode != CCFPmode
- && (code == ORDERED || code == UNORDERED
- || code == UNEQ || code == LTGT
- || code == UNGT || code == UNLT
- || code == UNGE || code == UNLE))
- abort ();
-
- /* These should never be generated except for
+ gcc_assert (mode == CCFPmode
+ || (code != ORDERED && code != UNORDERED
+ && code != UNEQ && code != LTGT
+ && code != UNGT && code != UNLT
+ && code != UNGE && code != UNLE));
+
+ /* These should never be generated except for
flag_finite_math_only. */
- if (mode == CCFPmode
- && ! flag_finite_math_only
- && (code == LE || code == GE
- || code == UNEQ || code == LTGT
- || code == UNGT || code == UNLT))
- abort ();
+ gcc_assert (mode != CCFPmode
+ || flag_finite_math_only
+ || (code != LE && code != GE
+ && code != UNEQ && code != LTGT
+ && code != UNGT && code != UNLT));
/* These are invalid; the information is not there. */
- if (mode == CCEQmode
- && code != EQ && code != NE)
- abort ();
-}
-
-/* Return 1 if OP is a comparison operation that is valid for a branch insn.
- We only check the opcode against the mode of the CC value here. */
-
-int
-branch_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- enum rtx_code code = GET_CODE (op);
- enum machine_mode cc_mode;
-
- if (GET_RTX_CLASS (code) != '<')
- return 0;
-
- cc_mode = GET_MODE (XEXP (op, 0));
- if (GET_MODE_CLASS (cc_mode) != MODE_CC)
- return 0;
-
- validate_condition_mode (code, cc_mode);
-
- return 1;
-}
-
-/* Return 1 if OP is a comparison operation that is valid for a branch
- insn and which is true if the corresponding bit in the CC register
- is set. */
-
-int
-branch_positive_comparison_operator (rtx op, enum machine_mode mode)
-{
- enum rtx_code code;
-
- if (! branch_comparison_operator (op, mode))
- return 0;
-
- code = GET_CODE (op);
- return (code == EQ || code == LT || code == GT
- || code == LTU || code == GTU
- || code == UNORDERED);
-}
-
-/* Return 1 if OP is a comparison operation that is valid for an scc
- insn: it must be a positive comparison. */
-
-int
-scc_comparison_operator (rtx op, enum machine_mode mode)
-{
- return branch_positive_comparison_operator (op, mode);
-}
-
-int
-trap_comparison_operator (rtx op, enum machine_mode mode)
-{
- if (mode != VOIDmode && mode != GET_MODE (op))
- return 0;
- return GET_RTX_CLASS (GET_CODE (op)) == '<';
-}
-
-int
-boolean_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- enum rtx_code code = GET_CODE (op);
- return (code == AND || code == IOR || code == XOR);
-}
-
-int
-boolean_or_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- enum rtx_code code = GET_CODE (op);
- return (code == IOR || code == XOR);
+ gcc_assert (mode != CCEQmode || code == EQ || code == NE);
}
-int
-min_max_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- enum rtx_code code = GET_CODE (op);
- return (code == SMIN || code == SMAX || code == UMIN || code == UMAX);
-}
/* Return 1 if ANDOP is a mask that has no bits on that are not in the
mask required to convert the result of a rotate insn into a shift
@@ -8637,17 +9852,39 @@ includes_rldicr_lshift_p (rtx shiftop, rtx andop)
return 0;
}
-/* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
- for lfq and stfq insns.
+/* Return 1 if operands will generate a valid arguments to rlwimi
+instruction for insert with right shift in 64-bit mode. The mask may
+not start on the first bit or stop on the last bit because wrap-around
+effects of instruction do not correspond to semantics of RTL insn. */
+
+int
+insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
+{
+ if (INTVAL (startop) > 32
+ && INTVAL (startop) < 64
+ && INTVAL (sizeop) > 1
+ && INTVAL (sizeop) + INTVAL (startop) < 64
+ && INTVAL (shiftop) > 0
+ && INTVAL (sizeop) + INTVAL (shiftop) < 32
+ && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
+ return 1;
+
+ return 0;
+}
- Note reg1 and reg2 *must* be hard registers. To be sure we will
- abort if we are passed pseudo registers. */
+/* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
+ for lfq and stfq insns iff the registers are hard registers. */
int
registers_ok_for_quad_peep (rtx reg1, rtx reg2)
{
/* We might have been passed a SUBREG. */
- if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ return 0;
+
+ /* We might have been passed non floating point registers. */
+ if (!FP_REGNO_P (REGNO (reg1))
+ || !FP_REGNO_P (REGNO (reg2)))
return 0;
return (REGNO (reg1) == REGNO (reg2) - 1);
@@ -8658,10 +9895,18 @@ registers_ok_for_quad_peep (rtx reg1, rtx reg2)
(addr2 == addr1 + 8). */
int
-addrs_ok_for_quad_peep (rtx addr1, rtx addr2)
+mems_ok_for_quad_peep (rtx mem1, rtx mem2)
{
- unsigned int reg1;
- int offset1;
+ rtx addr1, addr2;
+ unsigned int reg1, reg2;
+ int offset1, offset2;
+
+ /* The mems cannot be volatile. */
+ if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
+ return 0;
+
+ addr1 = XEXP (mem1, 0);
+ addr2 = XEXP (mem2, 0);
/* Extract an offset (if used) from the first addr. */
if (GET_CODE (addr1) == PLUS)
@@ -8671,11 +9916,11 @@ addrs_ok_for_quad_peep (rtx addr1, rtx addr2)
return 0;
else
{
- reg1 = REGNO (XEXP (addr1, 0));
+ reg1 = REGNO (XEXP (addr1, 0));
/* The offset must be constant! */
if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
- return 0;
- offset1 = INTVAL (XEXP (addr1, 1));
+ return 0;
+ offset1 = INTVAL (XEXP (addr1, 1));
}
}
else if (GET_CODE (addr1) != REG)
@@ -8687,23 +9932,36 @@ addrs_ok_for_quad_peep (rtx addr1, rtx addr2)
offset1 = 0;
}
- /* Make sure the second address is a (mem (plus (reg) (const_int)))
- or if it is (mem (reg)) then make sure that offset1 is -8 and the same
- register as addr1. */
- if (offset1 == -8 && GET_CODE (addr2) == REG && reg1 == REGNO (addr2))
- return 1;
- if (GET_CODE (addr2) != PLUS)
- return 0;
-
- if (GET_CODE (XEXP (addr2, 0)) != REG
- || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ /* And now for the second addr. */
+ if (GET_CODE (addr2) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr2, 0)) != REG)
+ return 0;
+ else
+ {
+ reg2 = REGNO (XEXP (addr2, 0));
+ /* The offset must be constant. */
+ if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ return 0;
+ offset2 = INTVAL (XEXP (addr2, 1));
+ }
+ }
+ else if (GET_CODE (addr2) != REG)
return 0;
+ else
+ {
+ reg2 = REGNO (addr2);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset2 = 0;
+ }
- if (reg1 != REGNO (XEXP (addr2, 0)))
+ /* Both of these must have the same base register. */
+ if (reg1 != reg2)
return 0;
/* The offset for the second addr must be 8 more than the first addr. */
- if (INTVAL (XEXP (addr2, 1)) != offset1 + 8)
+ if (offset2 != offset1 + 8)
return 0;
/* All the tests passed. addr1 and addr2 are valid for lfq or stfq
@@ -8716,31 +9974,31 @@ addrs_ok_for_quad_peep (rtx addr1, rtx addr2)
NO_REGS is returned. */
enum reg_class
-secondary_reload_class (enum reg_class class,
- enum machine_mode mode,
- rtx in)
+rs6000_secondary_reload_class (enum reg_class class,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx in)
{
int regno;
if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
#if TARGET_MACHO
- && MACHOPIC_INDIRECT
+ && MACHOPIC_INDIRECT
#endif
- ))
+ ))
{
/* We cannot copy a symbolic operand directly into anything
- other than BASE_REGS for TARGET_ELF. So indicate that a
- register from BASE_REGS is needed as an intermediate
- register.
-
+ other than BASE_REGS for TARGET_ELF. So indicate that a
+ register from BASE_REGS is needed as an intermediate
+ register.
+
On Darwin, pic addresses require a load from memory, which
needs a base register. */
if (class != BASE_REGS
- && (GET_CODE (in) == SYMBOL_REF
- || GET_CODE (in) == HIGH
- || GET_CODE (in) == LABEL_REF
- || GET_CODE (in) == CONST))
- return BASE_REGS;
+ && (GET_CODE (in) == SYMBOL_REF
+ || GET_CODE (in) == HIGH
+ || GET_CODE (in) == LABEL_REF
+ || GET_CODE (in) == CONST))
+ return BASE_REGS;
}
if (GET_CODE (in) == REG)
@@ -8788,7 +10046,7 @@ secondary_reload_class (enum reg_class class,
}
/* Given a comparison operation, return the bit number in CCR to test. We
- know this is a valid comparison.
+ know this is a valid comparison.
SCC_P is 1 if this is for an scc. That means that %D will have been
used instead of %C, so the bits will be in different places.
@@ -8804,14 +10062,12 @@ ccr_bit (rtx op, int scc_p)
int base_bit;
rtx reg;
- if (GET_RTX_CLASS (code) != '<')
+ if (!COMPARISON_P (op))
return -1;
reg = XEXP (op, 0);
- if (GET_CODE (reg) != REG
- || ! CR_REGNO_P (REGNO (reg)))
- abort ();
+ gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
cc_mode = GET_MODE (reg);
cc_regnum = REGNO (reg);
@@ -8821,10 +10077,10 @@ ccr_bit (rtx op, int scc_p)
/* When generating a sCOND operation, only positive conditions are
allowed. */
- if (scc_p && code != EQ && code != GT && code != LT && code != UNORDERED
- && code != GTU && code != LTU)
- abort ();
-
+ gcc_assert (!scc_p
+ || code == EQ || code == GT || code == LT || code == UNORDERED
+ || code == GTU || code == LTU);
+
switch (code)
{
case NE:
@@ -8848,13 +10104,13 @@ ccr_bit (rtx op, int scc_p)
return scc_p ? base_bit + 3 : base_bit + 1;
default:
- abort ();
+ gcc_unreachable ();
}
}
/* Return the GOT register. */
-struct rtx_def *
+rtx
rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
{
/* The second flow pass currently (June 1999) can't update
@@ -8896,8 +10152,7 @@ extract_MB (rtx op)
from the left. */
if ((val & 0x80000000) == 0)
{
- if ((val & 0xffffffff) == 0)
- abort ();
+ gcc_assert (val & 0xffffffff);
i = 1;
while (((val <<= 1) & 0x80000000) == 0)
@@ -8929,8 +10184,7 @@ extract_ME (rtx op)
the right. */
if ((val & 1) == 0)
{
- if ((val & 0xffffffff) == 0)
- abort ();
+ gcc_assert (val & 0xffffffff);
i = 30;
while (((val >>= 1) & 1) == 0)
@@ -8970,7 +10224,7 @@ rs6000_get_some_local_dynamic_name (void)
rs6000_get_some_local_dynamic_name_1, 0))
return cfun->machine->some_ld_name;
- abort ();
+ gcc_unreachable ();
}
/* Helper function for rs6000_get_some_local_dynamic_name. */
@@ -8993,6 +10247,36 @@ rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
return 0;
}
+/* Write out a function code label. */
+
+void
+rs6000_output_function_entry (FILE *file, const char *fname)
+{
+ if (fname[0] != '.')
+ {
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+ case ABI_AIX:
+ if (DOT_SYMBOLS)
+ putc ('.', file);
+ else
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
+ break;
+
+ case ABI_V4:
+ case ABI_DARWIN:
+ break;
+ }
+ }
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+}
+
/* Print an operand. Recognize special options, documented below. */
#if TARGET_ELF
@@ -9043,7 +10327,7 @@ print_operand (FILE *file, rtx x, int code)
case 'B':
/* If the low-order bit is zero, write 'r'; otherwise, write 'l'
for 64-bit mask direction. */
- putc (((INT_LOWPART(x) & 1) == 0 ? 'r' : 'l'), file);
+ putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
return;
/* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
@@ -9052,21 +10336,20 @@ print_operand (FILE *file, rtx x, int code)
case 'c':
/* X is a CR register. Print the number of the GT bit of the CR. */
if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
- output_operand_lossage ("invalid %%E value");
+ output_operand_lossage ("invalid %%E value");
else
- fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
return;
case 'D':
- /* Like 'J' but get to the EQ bit. */
- if (GET_CODE (x) != REG)
- abort ();
+ /* Like 'J' but get to the GT bit only. */
+ gcc_assert (GET_CODE (x) == REG);
- /* Bit 1 is EQ bit. */
- i = 4 * (REGNO (x) - CR0_REGNO) + 2;
+ /* Bit 1 is GT bit. */
+ i = 4 * (REGNO (x) - CR0_REGNO) + 1;
- /* If we want bit 31, write a shift count of zero, not 32. */
- fprintf (file, "%d", i == 31 ? 0 : i + 1);
+ /* Add one for shift count in rlinm for scc. */
+ fprintf (file, "%d", i + 1);
return;
case 'E':
@@ -9192,7 +10475,7 @@ print_operand (FILE *file, rtx x, int code)
/* Write second word of DImode or DFmode reference. Works on register
or non-indexed memory only. */
if (GET_CODE (x) == REG)
- fprintf (file, "%s", reg_names[REGNO (x) + 1]);
+ fputs (reg_names[REGNO (x) + 1], file);
else if (GET_CODE (x) == MEM)
{
/* Handle possible auto-increment. Since it is pre-increment and
@@ -9211,7 +10494,7 @@ print_operand (FILE *file, rtx x, int code)
reg_names[SMALL_DATA_REG]);
}
return;
-
+
case 'm':
/* MB value for a mask operand. */
if (! mask_operand (x, SImode))
@@ -9263,15 +10546,15 @@ print_operand (FILE *file, rtx x, int code)
|| REGNO (XEXP (x, 0)) >= 32)
output_operand_lossage ("invalid %%P value");
else
- fprintf (file, "%s", reg_names[REGNO (XEXP (x, 0))]);
+ fputs (reg_names[REGNO (XEXP (x, 0))], file);
return;
case 'q':
/* This outputs the logical code corresponding to a boolean
expression. The expression may have one or both operands
negated (if one, only the first one). For condition register
- logical operations, it will also treat the negated
- CR codes as NOTs, but not handle NOTs of them. */
+ logical operations, it will also treat the negated
+ CR codes as NOTs, but not handle NOTs of them. */
{
const char *const *t = 0;
const char *s;
@@ -9299,14 +10582,14 @@ print_operand (FILE *file, rtx x, int code)
else
s = t[1];
}
-
+
fputs (s, file);
}
return;
case 'Q':
if (TARGET_MFCRF)
- fputc (',',file);
+ fputc (',', file);
/* FALLTHRU */
else
return;
@@ -9353,15 +10636,13 @@ print_operand (FILE *file, rtx x, int code)
}
while (uval != 0)
--i, uval >>= 1;
- if (i < 0)
- abort ();
+ gcc_assert (i >= 0);
fprintf (file, "%d", i);
return;
case 't':
/* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
- if (GET_CODE (x) != REG || GET_MODE (x) != CCmode)
- abort ();
+ gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == CCmode);
/* Bit 3 is OV bit. */
i = 4 * (REGNO (x) - CR0_REGNO) + 3;
@@ -9386,7 +10667,7 @@ print_operand (FILE *file, rtx x, int code)
if (! INT_P (x))
output_operand_lossage ("invalid %%u value");
else
- fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
(INT_LOWPART (x) >> 16) & 0xffff);
return;
@@ -9442,7 +10723,7 @@ print_operand (FILE *file, rtx x, int code)
fputs ("lge", file); /* 5 */
break;
default:
- abort ();
+ gcc_unreachable ();
}
break;
@@ -9450,7 +10731,7 @@ print_operand (FILE *file, rtx x, int code)
/* If constant, low-order 16 bits of constant, signed. Otherwise, write
normally. */
if (INT_P (x))
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
else
print_operand (file, x, 0);
@@ -9475,9 +10756,8 @@ print_operand (FILE *file, rtx x, int code)
{
val = CONST_DOUBLE_LOW (x);
- if (val == 0)
- abort ();
- else if (val < 0)
+ gcc_assert (val);
+ if (val < 0)
--i;
else
for ( ; i < 64; i++)
@@ -9498,7 +10778,7 @@ print_operand (FILE *file, rtx x, int code)
case 'Y':
/* Like 'L', for third word of TImode */
if (GET_CODE (x) == REG)
- fprintf (file, "%s", reg_names[REGNO (x) + 2]);
+ fputs (reg_names[REGNO (x) + 2], file);
else if (GET_CODE (x) == MEM)
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
@@ -9511,42 +10791,41 @@ print_operand (FILE *file, rtx x, int code)
reg_names[SMALL_DATA_REG]);
}
return;
-
+
case 'z':
/* X is a SYMBOL_REF. Write out the name preceded by a
period and without any trailing data in brackets. Used for function
names. If we are configured for System V (or the embedded ABI) on
the PowerPC, do not emit the period, since those systems do not use
TOCs and the like. */
- if (GET_CODE (x) != SYMBOL_REF)
- abort ();
-
- if (XSTR (x, 0)[0] != '.')
- {
- switch (DEFAULT_ABI)
- {
- default:
- abort ();
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
- case ABI_AIX:
- putc ('.', file);
- break;
+ /* Mark the decl as referenced so that cgraph will output the
+ function. */
+ if (SYMBOL_REF_DECL (x))
+ mark_decl_referenced (SYMBOL_REF_DECL (x));
- case ABI_V4:
- case ABI_DARWIN:
- break;
- }
+ /* For macho, check to see if we need a stub. */
+ if (TARGET_MACHO)
+ {
+ const char *name = XSTR (x, 0);
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT
+ && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
+ name = machopic_indirection_name (x, /*stub_p=*/true);
+#endif
+ assemble_name (file, name);
}
- if (TARGET_AIX)
- RS6000_OUTPUT_BASENAME (file, XSTR (x, 0));
- else
+ else if (!DOT_SYMBOLS)
assemble_name (file, XSTR (x, 0));
+ else
+ rs6000_output_function_entry (file, XSTR (x, 0));
return;
case 'Z':
/* Like 'L', for last word of TImode. */
if (GET_CODE (x) == REG)
- fprintf (file, "%s", reg_names[REGNO (x) + 3]);
+ fputs (reg_names[REGNO (x) + 3], file);
else if (GET_CODE (x) == MEM)
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
@@ -9565,12 +10844,12 @@ print_operand (FILE *file, rtx x, int code)
{
rtx tmp;
- if (GET_CODE (x) != MEM)
- abort ();
+ gcc_assert (GET_CODE (x) == MEM);
tmp = XEXP (x, 0);
- if (TARGET_E500)
+ /* Ugly hack because %y is overloaded. */
+ if (TARGET_E500 && GET_MODE_SIZE (GET_MODE (x)) == 8)
{
/* Handle [reg]. */
if (GET_CODE (tmp) == REG)
@@ -9584,8 +10863,7 @@ print_operand (FILE *file, rtx x, int code)
{
int x;
- if (GET_CODE (XEXP (tmp, 0)) != REG)
- abort ();
+ gcc_assert (GET_CODE (XEXP (tmp, 0)) == REG);
x = INTVAL (XEXP (tmp, 1));
fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
@@ -9594,10 +10872,19 @@ print_operand (FILE *file, rtx x, int code)
/* Fall through. Must be [reg+reg]. */
}
+ if (TARGET_ALTIVEC
+ && GET_CODE (tmp) == AND
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT
+ && INTVAL (XEXP (tmp, 1)) == -16)
+ tmp = XEXP (tmp, 0);
if (GET_CODE (tmp) == REG)
fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
- else if (GET_CODE (tmp) == PLUS && GET_CODE (XEXP (tmp, 1)) == REG)
+ else
{
+ gcc_assert (GET_CODE (tmp) == PLUS
+ && REG_P (XEXP (tmp, 0))
+ && REG_P (XEXP (tmp, 1)));
+
if (REGNO (XEXP (tmp, 0)) == 0)
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
reg_names[ REGNO (XEXP (tmp, 0)) ]);
@@ -9605,11 +10892,9 @@ print_operand (FILE *file, rtx x, int code)
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
reg_names[ REGNO (XEXP (tmp, 1)) ]);
}
- else
- abort ();
break;
}
-
+
case 0:
if (GET_CODE (x) == REG)
fprintf (file, "%s", reg_names[REGNO (x)]);
@@ -9653,11 +10938,12 @@ print_operand_address (FILE *file, rtx x)
if (small_data_operand (x, GET_MODE (x)))
fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
reg_names[SMALL_DATA_REG]);
- else if (TARGET_TOC)
- abort ();
+ else
+ gcc_assert (!TARGET_TOC);
}
else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
{
+ gcc_assert (REG_P (XEXP (x, 0)));
if (REGNO (XEXP (x, 0)) == 0)
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
reg_names[ REGNO (XEXP (x, 0)) ]);
@@ -9670,7 +10956,7 @@ print_operand_address (FILE *file, rtx x)
INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
#if TARGET_ELF
else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
- && CONSTANT_P (XEXP (x, 1)))
+ && CONSTANT_P (XEXP (x, 1)))
{
output_addr_const (file, XEXP (x, 1));
fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
@@ -9678,7 +10964,7 @@ print_operand_address (FILE *file, rtx x)
#endif
#if TARGET_MACHO
else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
- && CONSTANT_P (XEXP (x, 1)))
+ && CONSTANT_P (XEXP (x, 1)))
{
fprintf (file, "lo16(");
output_addr_const (file, XEXP (x, 1));
@@ -9692,7 +10978,7 @@ print_operand_address (FILE *file, rtx x)
rtx contains_minus = XEXP (x, 1);
rtx minus, symref;
const char *name;
-
+
/* Find the (minus (sym) (toc)) buried in X, and temporarily
turn it into (sym) for output_addr_const. */
while (GET_CODE (XEXP (contains_minus, 0)) != MINUS)
@@ -9722,7 +11008,7 @@ print_operand_address (FILE *file, rtx x)
fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
}
else
- abort ();
+ gcc_unreachable ();
}
/* Target hook for assembling integer objects. The PowerPC version has
@@ -9737,14 +11023,14 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
/* Special handling for SI values. */
if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
{
- extern int in_toc_section (void);
static int recurse = 0;
-
+
/* For -mrelocatable, we mark all addresses that need to be fixed up
in the .fixup section. */
if (TARGET_RELOCATABLE
- && !in_toc_section ()
- && !in_text_section ()
+ && in_section != toc_section
+ && in_section != text_section
+ && !unlikely_text_section_p (in_section)
&& !recurse
&& GET_CODE (x) != CONST_INT
&& GET_CODE (x) != CONST_DOUBLE
@@ -9795,10 +11081,12 @@ rs6000_assemble_visibility (tree decl, int vis)
{
/* Functions need to have their entry point symbol visibility set as
well as their descriptor symbol visibility. */
- if (DEFAULT_ABI == ABI_AIX && TREE_CODE (decl) == FUNCTION_DECL)
+ if (DEFAULT_ABI == ABI_AIX
+ && DOT_SYMBOLS
+ && TREE_CODE (decl) == FUNCTION_DECL)
{
static const char * const visibility_types[] = {
- NULL, "internal", "hidden", "protected"
+ NULL, "internal", "hidden", "protected"
};
const char *name, *type;
@@ -9820,7 +11108,7 @@ rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
{
/* Reversal of FP compares takes care -- an ordered compare
becomes an unordered compare and vice versa. */
- if (mode == CCFPmode
+ if (mode == CCFPmode
&& (!flag_finite_math_only
|| code == UNLT || code == UNLE || code == UNGT || code == UNGE
|| code == UNEQ || code == LTGT))
@@ -9841,7 +11129,17 @@ rs6000_generate_compare (enum rtx_code code)
if (rs6000_compare_fp_p)
comp_mode = CCFPmode;
else if (code == GTU || code == LTU
- || code == GEU || code == LEU)
+ || code == GEU || code == LEU)
+ comp_mode = CCUNSmode;
+ else if ((code == EQ || code == NE)
+ && GET_CODE (rs6000_compare_op0) == SUBREG
+ && GET_CODE (rs6000_compare_op1) == SUBREG
+ && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op0)
+ && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op1))
+ /* These are unsigned values, perhaps there will be a later
+ ordering compare that can be shared with this one.
+ Unfortunately we cannot detect the signedness of the operands
+ for non-subregs. */
comp_mode = CCUNSmode;
else
comp_mode = CCmode;
@@ -9849,40 +11147,94 @@ rs6000_generate_compare (enum rtx_code code)
/* First, the compare. */
compare_result = gen_reg_rtx (comp_mode);
- /* SPE FP compare instructions on the GPRs. Yuck! */
+ /* E500 FP compare instructions on the GPRs. Yuck! */
if ((TARGET_E500 && !TARGET_FPRS && TARGET_HARD_FLOAT)
&& rs6000_compare_fp_p)
{
- rtx cmp, or1, or2, or_result, compare_result2;
+ rtx cmp, or_result, compare_result2;
+ enum machine_mode op_mode = GET_MODE (rs6000_compare_op0);
+
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (rs6000_compare_op1);
- /* Note: The E500 comparison instructions set the GT bit (x +
- 1), on success. This explains the mess. */
+ /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
+ This explains the following mess. */
switch (code)
{
- case EQ: case UNEQ: case NE: case LTGT:
- cmp = flag_finite_math_only
- ? gen_tstsfeq_gpr (compare_result, rs6000_compare_op0,
- rs6000_compare_op1)
- : gen_cmpsfeq_gpr (compare_result, rs6000_compare_op0,
- rs6000_compare_op1);
+ case EQ: case UNEQ: case NE: case LTGT:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
break;
- case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
- cmp = flag_finite_math_only
- ? gen_tstsfgt_gpr (compare_result, rs6000_compare_op0,
- rs6000_compare_op1)
- : gen_cmpsfgt_gpr (compare_result, rs6000_compare_op0,
- rs6000_compare_op1);
+
+ case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
break;
- case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
- cmp = flag_finite_math_only
- ? gen_tstsflt_gpr (compare_result, rs6000_compare_op0,
- rs6000_compare_op1)
- : gen_cmpsflt_gpr (compare_result, rs6000_compare_op0,
- rs6000_compare_op1);
+
+ case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
break;
- default:
- abort ();
+ default:
+ gcc_unreachable ();
}
/* Synthesize LE and GE from LT/GT || EQ. */
@@ -9896,30 +11248,39 @@ rs6000_generate_compare (enum rtx_code code)
case GE: code = GT; break;
case LEU: code = LT; break;
case GEU: code = GT; break;
- default: abort ();
+ default: gcc_unreachable ();
}
- or1 = gen_reg_rtx (SImode);
- or2 = gen_reg_rtx (SImode);
- or_result = gen_reg_rtx (CCEQmode);
compare_result2 = gen_reg_rtx (CCFPmode);
/* Do the EQ. */
- cmp = flag_finite_math_only
- ? gen_tstsfeq_gpr (compare_result2, rs6000_compare_op0,
- rs6000_compare_op1)
- : gen_cmpsfeq_gpr (compare_result2, rs6000_compare_op0,
- rs6000_compare_op1);
- emit_insn (cmp);
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
- or1 = gen_rtx_GT (SImode, compare_result, const0_rtx);
- or2 = gen_rtx_GT (SImode, compare_result2, const0_rtx);
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ emit_insn (cmp);
/* OR them together. */
- cmp = gen_rtx_SET (VOIDmode, or_result,
- gen_rtx_COMPARE (CCEQmode,
- gen_rtx_IOR (SImode, or1, or2),
- const_true_rtx));
+ or_result = gen_reg_rtx (CCFPmode);
+ cmp = gen_e500_cr_ior_compare (or_result, compare_result,
+ compare_result2);
compare_result = or_result;
code = EQ;
}
@@ -9927,8 +11288,8 @@ rs6000_generate_compare (enum rtx_code code)
{
if (code == NE || code == LTGT)
code = NE;
- else
- code = EQ;
+ else
+ code = EQ;
}
emit_insn (cmp);
@@ -9936,13 +11297,13 @@ rs6000_generate_compare (enum rtx_code code)
else
{
/* Generate XLC-compatible TFmode compare as PARALLEL with extra
- CLOBBERs to match cmptf_internal2 pattern. */
+ CLOBBERs to match cmptf_internal2 pattern. */
if (comp_mode == CCFPmode && TARGET_XL_COMPAT
- && GET_MODE (rs6000_compare_op0) == TFmode
- && (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
- && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
- emit_insn (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (9,
+ && GET_MODE (rs6000_compare_op0) == TFmode
+ && !TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (9,
gen_rtx_SET (VOIDmode,
compare_result,
gen_rtx_COMPARE (comp_mode,
@@ -9956,18 +11317,31 @@ rs6000_generate_compare (enum rtx_code code)
gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))));
+ else if (GET_CODE (rs6000_compare_op1) == UNSPEC
+ && XINT (rs6000_compare_op1, 1) == UNSPEC_SP_TEST)
+ {
+ rtx op1 = XVECEXP (rs6000_compare_op1, 0, 0);
+ comp_mode = CCEQmode;
+ compare_result = gen_reg_rtx (CCEQmode);
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_testdi (compare_result,
+ rs6000_compare_op0, op1));
+ else
+ emit_insn (gen_stack_protect_testsi (compare_result,
+ rs6000_compare_op0, op1));
+ }
else
emit_insn (gen_rtx_SET (VOIDmode, compare_result,
gen_rtx_COMPARE (comp_mode,
- rs6000_compare_op0,
+ rs6000_compare_op0,
rs6000_compare_op1)));
}
-
+
/* Some kinds of FP comparisons need an OR operation;
under flag_finite_math_only we don't bother. */
if (rs6000_compare_fp_p
- && ! flag_finite_math_only
- && ! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)
+ && !flag_finite_math_only
+ && !(TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)
&& (code == LE || code == GE
|| code == UNEQ || code == LTGT
|| code == UNGT || code == UNLT))
@@ -9975,7 +11349,7 @@ rs6000_generate_compare (enum rtx_code code)
enum rtx_code or1, or2;
rtx or1_rtx, or2_rtx, compare2_rtx;
rtx or_result = gen_reg_rtx (CCEQmode);
-
+
switch (code)
{
case LE: or1 = LT; or2 = EQ; break;
@@ -9984,12 +11358,12 @@ rs6000_generate_compare (enum rtx_code code)
case LTGT: or1 = LT; or2 = GT; break;
case UNGT: or1 = UNORDERED; or2 = GT; break;
case UNLT: or1 = UNORDERED; or2 = LT; break;
- default: abort ();
+ default: gcc_unreachable ();
}
validate_condition_mode (or1, comp_mode);
validate_condition_mode (or2, comp_mode);
- or1_rtx = gen_rtx (or1, SImode, compare_result, const0_rtx);
- or2_rtx = gen_rtx (or2, SImode, compare_result, const0_rtx);
+ or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
+ or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
compare2_rtx = gen_rtx_COMPARE (CCEQmode,
gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
const_true_rtx);
@@ -10000,8 +11374,8 @@ rs6000_generate_compare (enum rtx_code code)
}
validate_condition_mode (code, GET_MODE (compare_result));
-
- return gen_rtx (code, VOIDmode, compare_result, const0_rtx);
+
+ return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
}
@@ -10025,13 +11399,12 @@ rs6000_emit_sCOND (enum rtx_code code, rtx result)
PUT_MODE (condition_rtx, SImode);
t = XEXP (condition_rtx, 0);
- if (cond_code != NE && cond_code != EQ)
- abort ();
+ gcc_assert (cond_code == NE || cond_code == EQ);
if (cond_code == NE)
- emit_insn (gen_e500_flip_eq_bit (t, t));
+ emit_insn (gen_e500_flip_gt_bit (t, t));
- emit_insn (gen_move_from_CR_eq_bit (result, t));
+ emit_insn (gen_move_from_CR_gt_bit (result, t));
return;
}
@@ -10043,11 +11416,11 @@ rs6000_emit_sCOND (enum rtx_code code, rtx result)
rtx not_result = gen_reg_rtx (CCEQmode);
rtx not_op, rev_cond_rtx;
enum machine_mode cc_mode;
-
+
cc_mode = GET_MODE (XEXP (condition_rtx, 0));
- rev_cond_rtx = gen_rtx (rs6000_reverse_condition (cc_mode, cond_code),
- SImode, XEXP (condition_rtx, 0), const0_rtx);
+ rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
+ SImode, XEXP (condition_rtx, 0), const0_rtx);
not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
@@ -10085,7 +11458,7 @@ rs6000_emit_cbranch (enum rtx_code code, rtx loc)
/* Return the string to output a conditional branch to LABEL, which is
the operand number of the label, or -1 if the branch is really a
- conditional return.
+ conditional return.
OP is the conditional expression. XEXP (OP, 0) is assumed to be a
condition code register and its mode specifies what kind of
@@ -10129,13 +11502,20 @@ output_cbranch (rtx op, const char *label, int reversed, rtx insn)
{
/* The efscmp/tst* instructions twiddle bit 2, which maps nicely
to the GT bit. */
- if (code == EQ)
- /* Opposite of GT. */
- code = GT;
- else if (code == NE)
- code = UNLE;
- else
- abort ();
+ switch (code)
+ {
+ case EQ:
+ /* Opposite of GT. */
+ code = GT;
+ break;
+
+ case NE:
+ code = UNLE;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
}
switch (code)
@@ -10146,23 +11526,23 @@ output_cbranch (rtx op, const char *label, int reversed, rtx insn)
ccode = "ne"; break;
case EQ: case UNEQ:
ccode = "eq"; break;
- case GE: case GEU:
+ case GE: case GEU:
ccode = "ge"; break;
- case GT: case GTU: case UNGT:
+ case GT: case GTU: case UNGT:
ccode = "gt"; break;
- case LE: case LEU:
+ case LE: case LEU:
ccode = "le"; break;
- case LT: case LTU: case UNLT:
+ case LT: case LTU: case UNLT:
ccode = "lt"; break;
case UNORDERED: ccode = "un"; break;
case ORDERED: ccode = "nu"; break;
case UNGE: ccode = "nl"; break;
case UNLE: ccode = "ng"; break;
default:
- abort ();
+ gcc_unreachable ();
}
-
- /* Maybe we have a guess as to how likely the branch is.
+
+ /* Maybe we have a guess as to how likely the branch is.
The old mnemonics don't have a way to specify this information. */
pred = "";
note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
@@ -10176,13 +11556,14 @@ output_cbranch (rtx op, const char *label, int reversed, rtx insn)
prediction. For older cpus we may as well always hint, but
assume not taken for branches that are very close to 50% as a
mispredicted taken branch is more expensive than a
- mispredicted not-taken branch. */
+ mispredicted not-taken branch. */
if (rs6000_always_hint
- || abs (prob) > REG_BR_PROB_BASE / 100 * 48)
+ || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
+ && br_prob_note_reliable_p (note)))
{
if (abs (prob) > REG_BR_PROB_BASE / 20
&& ((prob > 0) ^ need_longbranch))
- pred = "+";
+ pred = "+";
else
pred = "-";
}
@@ -10212,25 +11593,292 @@ output_cbranch (rtx op, const char *label, int reversed, rtx insn)
return string;
}
-/* Return the string to flip the EQ bit on a CR. */
+/* Return the string to flip the GT bit on a CR. */
char *
-output_e500_flip_eq_bit (rtx dst, rtx src)
+output_e500_flip_gt_bit (rtx dst, rtx src)
{
static char string[64];
int a, b;
- if (GET_CODE (dst) != REG || ! CR_REGNO_P (REGNO (dst))
- || GET_CODE (src) != REG || ! CR_REGNO_P (REGNO (src)))
- abort ();
+ gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
+ && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
- /* EQ bit. */
- a = 4 * (REGNO (dst) - CR0_REGNO) + 2;
- b = 4 * (REGNO (src) - CR0_REGNO) + 2;
+ /* GT bit. */
+ a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
+ b = 4 * (REGNO (src) - CR0_REGNO) + 1;
sprintf (string, "crnot %d,%d", a, b);
return string;
}
+/* Return insn index for the vector compare instruction for given CODE,
+ and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
+ not available. */
+
+static int
+get_vec_cmp_insn (enum rtx_code code,
+ enum machine_mode dest_mode,
+ enum machine_mode op_mode)
+{
+ if (!TARGET_ALTIVEC)
+ return INSN_NOT_AVAILABLE;
+
+ switch (code)
+ {
+ case EQ:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPEQUB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPEQUH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPEQUW;
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPEQFP;
+ break;
+ case GE:
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPGEFP;
+ case GT:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPGTSB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPGTSH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPGTSW;
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPGTFP;
+ break;
+ case GTU:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPGTUB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPGTUH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPGTUW;
+ break;
+ default:
+ break;
+ }
+ return INSN_NOT_AVAILABLE;
+}
+
+/* Emit vector compare for operands OP0 and OP1 using code RCODE.
+ DMODE is expected destination mode. This is a recursive function. */
+
+static rtx
+rs6000_emit_vector_compare (enum rtx_code rcode,
+ rtx op0, rtx op1,
+ enum machine_mode dmode)
+{
+ int vec_cmp_insn;
+ rtx mask;
+ enum machine_mode dest_mode;
+ enum machine_mode op_mode = GET_MODE (op1);
+
+ gcc_assert (TARGET_ALTIVEC);
+ gcc_assert (GET_MODE (op0) == GET_MODE (op1));
+
+ /* Floating point vector compare instructions uses destination V4SImode.
+ Move destination to appropriate mode later. */
+ if (dmode == V4SFmode)
+ dest_mode = V4SImode;
+ else
+ dest_mode = dmode;
+
+ mask = gen_reg_rtx (dest_mode);
+ vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
+
+ if (vec_cmp_insn == INSN_NOT_AVAILABLE)
+ {
+ bool swap_operands = false;
+ bool try_again = false;
+ switch (rcode)
+ {
+ case LT:
+ rcode = GT;
+ swap_operands = true;
+ try_again = true;
+ break;
+ case LTU:
+ rcode = GTU;
+ swap_operands = true;
+ try_again = true;
+ break;
+ case NE:
+ /* Treat A != B as ~(A==B). */
+ {
+ enum insn_code nor_code;
+ rtx eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
+ dest_mode);
+
+ nor_code = one_cmpl_optab->handlers[(int)dest_mode].insn_code;
+ gcc_assert (nor_code != CODE_FOR_nothing);
+ emit_insn (GEN_FCN (nor_code) (mask, eq_rtx));
+
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+ }
+ break;
+ case GE:
+ case GEU:
+ case LE:
+ case LEU:
+ /* Try GT/GTU/LT/LTU OR EQ */
+ {
+ rtx c_rtx, eq_rtx;
+ enum insn_code ior_code;
+ enum rtx_code new_code;
+
+ switch (rcode)
+ {
+ case GE:
+ new_code = GT;
+ break;
+
+ case GEU:
+ new_code = GTU;
+ break;
+
+ case LE:
+ new_code = LT;
+ break;
+
+ case LEU:
+ new_code = LTU;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ c_rtx = rs6000_emit_vector_compare (new_code,
+ op0, op1, dest_mode);
+ eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
+ dest_mode);
+
+ ior_code = ior_optab->handlers[(int)dest_mode].insn_code;
+ gcc_assert (ior_code != CODE_FOR_nothing);
+ emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (try_again)
+ {
+ vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
+ /* You only get two chances. */
+ gcc_assert (vec_cmp_insn != INSN_NOT_AVAILABLE);
+ }
+
+ if (swap_operands)
+ {
+ rtx tmp;
+ tmp = op0;
+ op0 = op1;
+ op1 = tmp;
+ }
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, mask,
+ gen_rtx_UNSPEC (dest_mode,
+ gen_rtvec (2, op0, op1),
+ vec_cmp_insn)));
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+}
+
+/* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
+ valid insn doesn exist for given mode. */
+
+static int
+get_vsel_insn (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case V4SImode:
+ return UNSPEC_VSEL4SI;
+ break;
+ case V4SFmode:
+ return UNSPEC_VSEL4SF;
+ break;
+ case V8HImode:
+ return UNSPEC_VSEL8HI;
+ break;
+ case V16QImode:
+ return UNSPEC_VSEL16QI;
+ break;
+ default:
+ return INSN_NOT_AVAILABLE;
+ break;
+ }
+ return INSN_NOT_AVAILABLE;
+}
+
+/* Emit vector select insn where DEST is destination using
+ operands OP1, OP2 and MASK. */
+
+static void
+rs6000_emit_vector_select (rtx dest, rtx op1, rtx op2, rtx mask)
+{
+ rtx t, temp;
+ enum machine_mode dest_mode = GET_MODE (dest);
+ int vsel_insn_index = get_vsel_insn (GET_MODE (dest));
+
+ temp = gen_reg_rtx (dest_mode);
+
+ /* For each vector element, select op1 when mask is 1 otherwise
+ select op2. */
+ t = gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_UNSPEC (dest_mode,
+ gen_rtvec (3, op2, op1, mask),
+ vsel_insn_index));
+ emit_insn (t);
+ emit_move_insn (dest, temp);
+ return;
+}
+
+/* Emit vector conditional expression.
+ DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
+ CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
+
+int
+rs6000_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
+ rtx cond, rtx cc_op0, rtx cc_op1)
+{
+ enum machine_mode dest_mode = GET_MODE (dest);
+ enum rtx_code rcode = GET_CODE (cond);
+ rtx mask;
+
+ if (!TARGET_ALTIVEC)
+ return 0;
+
+ /* Get the vector mask for the given relational operations. */
+ mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
+
+ rs6000_emit_vector_select (dest, op1, op2, mask);
+
+ return 1;
+}
+
/* Emit a conditional move: move TRUE_COND to DEST if OP of the
operands of the last comparison is nonzero/true, FALSE_COND if it
is zero/false. Return 0 if the hardware has no such operation. */
@@ -10245,6 +11893,7 @@ rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
enum machine_mode compare_mode = GET_MODE (op0);
enum machine_mode result_mode = GET_MODE (dest);
rtx temp;
+ bool is_against_zero;
/* These modes should always match. */
if (GET_MODE (op1) != compare_mode
@@ -10266,7 +11915,18 @@ rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
return 0;
}
else if (TARGET_E500 && TARGET_HARD_FLOAT && !TARGET_FPRS
- && GET_MODE_CLASS (compare_mode) == MODE_FLOAT)
+ && SCALAR_FLOAT_MODE_P (compare_mode))
+ return 0;
+
+ is_against_zero = op1 == CONST0_RTX (compare_mode);
+
+ /* A floating-point subtract might overflow, underflow, or produce
+ an inexact result, thus changing the floating-point flags, so it
+ can't be generated if we care about that. It's safe if one side
+ of the construct is zero, since then no subtract will be
+ generated. */
+ if (SCALAR_FLOAT_MODE_P (compare_mode)
+ && flag_trapping_math && ! is_against_zero)
return 0;
/* Eliminate half of the comparisons by switching operands, this
@@ -10284,10 +11944,10 @@ rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
it'll probably be faster to use a branch here too. */
if (code == UNEQ && HONOR_NANS (compare_mode))
return 0;
-
+
if (GET_CODE (op1) == CONST_DOUBLE)
REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
-
+
/* We're going to try to implement comparisons by performing
a subtract, then comparing against zero. Unfortunately,
Inf - Inf is NaN which is not zero, and so if we don't
@@ -10298,17 +11958,21 @@ rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
&& (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
/* Constructs of the form (a OP b ? a : b) are safe. */
&& ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
- || (! rtx_equal_p (op0, true_cond)
+ || (! rtx_equal_p (op0, true_cond)
&& ! rtx_equal_p (op1, true_cond))))
return 0;
+
/* At this point we know we can use fsel. */
/* Reduce the comparison to a comparison against zero. */
- temp = gen_reg_rtx (compare_mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_MINUS (compare_mode, op0, op1)));
- op0 = temp;
- op1 = CONST0_RTX (compare_mode);
+ if (! is_against_zero)
+ {
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_MINUS (compare_mode, op0, op1)));
+ op0 = temp;
+ op1 = CONST0_RTX (compare_mode);
+ }
/* If we don't care about NaNs we can reduce some of the comparisons
down to faster ones. */
@@ -10351,7 +12015,7 @@ rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
case EQ:
temp = gen_reg_rtx (compare_mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp,
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
gen_rtx_NEG (compare_mode,
gen_rtx_ABS (compare_mode, op0))));
op0 = temp;
@@ -10377,7 +12041,7 @@ rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
/* a GT 0 <-> (a GE 0 && -a UNLT 0) */
temp = gen_reg_rtx (result_mode);
emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_IF_THEN_ELSE (result_mode,
gen_rtx_GE (VOIDmode,
op0, op1),
true_cond, false_cond)));
@@ -10390,7 +12054,7 @@ rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
break;
default:
- abort ();
+ gcc_unreachable ();
}
emit_insn (gen_rtx_SET (VOIDmode, dest,
@@ -10456,18 +12120,454 @@ rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
c = GEU;
if (code == SMAX || code == UMAX)
- target = emit_conditional_move (dest, c, op0, op1, mode,
+ target = emit_conditional_move (dest, c, op0, op1, mode,
op0, op1, mode, 0);
else
- target = emit_conditional_move (dest, c, op0, op1, mode,
+ target = emit_conditional_move (dest, c, op0, op1, mode,
op1, op0, mode, 0);
- if (target == NULL_RTX)
- abort ();
+ gcc_assert (target);
if (target != dest)
emit_move_insn (dest, target);
}
-/* Emit instructions to move SRC to DST. Called by splitters for
+/* Emit instructions to perform a load-reserved/store-conditional operation.
+ The operation performed is an atomic
+ (set M (CODE:MODE M OP))
+ If not NULL, BEFORE is atomically set to M before the operation, and
+ AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
+ If SYNC_P then a memory barrier is emitted before the operation.
+ Either OP or M may be wrapped in a NOT operation. */
+
+void
+rs6000_emit_sync (enum rtx_code code, enum machine_mode mode,
+ rtx m, rtx op, rtx before_param, rtx after_param,
+ bool sync_p)
+{
+ enum machine_mode used_mode;
+ rtx the_op, set_before, set_after, set_atomic, cc_scratch, before, after;
+ rtx used_m;
+ rtvec vec;
+ HOST_WIDE_INT imask = GET_MODE_MASK (mode);
+ rtx shift = NULL_RTX;
+
+ if (sync_p)
+ emit_insn (gen_memory_barrier ());
+
+ if (GET_CODE (m) == NOT)
+ used_m = XEXP (m, 0);
+ else
+ used_m = m;
+
+ /* If this is smaller than SImode, we'll have to use SImode with
+ adjustments. */
+ if (mode == QImode || mode == HImode)
+ {
+ rtx newop, oldop;
+
+ if (MEM_ALIGN (used_m) >= 32)
+ {
+ int ishift = 0;
+ if (BYTES_BIG_ENDIAN)
+ ishift = GET_MODE_BITSIZE (SImode) - GET_MODE_BITSIZE (mode);
+
+ shift = GEN_INT (ishift);
+ }
+ else
+ {
+ rtx addrSI, aligned_addr;
+ int shift_mask = mode == QImode ? 0x18 : 0x10;
+
+ addrSI = force_reg (SImode, gen_lowpart_common (SImode,
+ XEXP (used_m, 0)));
+ shift = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
+ GEN_INT (shift_mask)));
+ emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
+
+ aligned_addr = expand_binop (Pmode, and_optab,
+ XEXP (used_m, 0),
+ GEN_INT (-4), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ used_m = change_address (used_m, SImode, aligned_addr);
+ set_mem_align (used_m, 32);
+ /* It's safe to keep the old alias set of USED_M, because
+ the operation is atomic and only affects the original
+ USED_M. */
+ if (GET_CODE (m) == NOT)
+ m = gen_rtx_NOT (SImode, used_m);
+ else
+ m = used_m;
+ }
+
+ if (GET_CODE (op) == NOT)
+ {
+ oldop = lowpart_subreg (SImode, XEXP (op, 0), mode);
+ oldop = gen_rtx_NOT (SImode, oldop);
+ }
+ else
+ oldop = lowpart_subreg (SImode, op, mode);
+
+ switch (code)
+ {
+ case IOR:
+ case XOR:
+ newop = expand_binop (SImode, and_optab,
+ oldop, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newop, newop, shift));
+ break;
+
+ case AND:
+ newop = expand_binop (SImode, ior_optab,
+ oldop, GEN_INT (~imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_rotlsi3 (newop, newop, shift));
+ break;
+
+ case PLUS:
+ case MINUS:
+ {
+ rtx mask;
+
+ newop = expand_binop (SImode, and_optab,
+ oldop, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newop, newop, shift));
+
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ if (code == PLUS)
+ newop = gen_rtx_PLUS (SImode, m, newop);
+ else
+ newop = gen_rtx_MINUS (SImode, m, newop);
+ newop = gen_rtx_AND (SImode, newop, mask);
+ newop = gen_rtx_IOR (SImode, newop,
+ gen_rtx_AND (SImode,
+ gen_rtx_NOT (SImode, mask),
+ m));
+ break;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (GET_CODE (m) == NOT)
+ {
+ rtx mask, xorm;
+
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ xorm = gen_rtx_XOR (SImode, used_m, mask);
+ /* Depending on the value of 'op', the XOR or the operation might
+ be able to be simplified away. */
+ newop = simplify_gen_binary (code, SImode, xorm, newop);
+ }
+ op = newop;
+ used_mode = SImode;
+ before = gen_reg_rtx (used_mode);
+ after = gen_reg_rtx (used_mode);
+ }
+ else
+ {
+ used_mode = mode;
+ before = before_param;
+ after = after_param;
+
+ if (before == NULL_RTX)
+ before = gen_reg_rtx (used_mode);
+ if (after == NULL_RTX)
+ after = gen_reg_rtx (used_mode);
+ }
+
+ if ((code == PLUS || code == MINUS || GET_CODE (m) == NOT)
+ && used_mode != mode)
+ the_op = op; /* Computed above. */
+ else if (GET_CODE (op) == NOT && GET_CODE (m) != NOT)
+ the_op = gen_rtx_fmt_ee (code, used_mode, op, m);
+ else
+ the_op = gen_rtx_fmt_ee (code, used_mode, m, op);
+
+ set_after = gen_rtx_SET (VOIDmode, after, the_op);
+ set_before = gen_rtx_SET (VOIDmode, before, used_m);
+ set_atomic = gen_rtx_SET (VOIDmode, used_m,
+ gen_rtx_UNSPEC (used_mode,
+ gen_rtvec (1, the_op),
+ UNSPEC_SYNC_OP));
+ cc_scratch = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
+
+ if ((code == PLUS || code == MINUS) && used_mode != mode)
+ vec = gen_rtvec (5, set_after, set_before, set_atomic, cc_scratch,
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
+ else
+ vec = gen_rtvec (4, set_after, set_before, set_atomic, cc_scratch);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, vec));
+
+ /* Shift and mask the return values properly. */
+ if (used_mode != mode && before_param)
+ {
+ emit_insn (gen_lshrsi3 (before, before, shift));
+ convert_move (before_param, before, 1);
+ }
+
+ if (used_mode != mode && after_param)
+ {
+ emit_insn (gen_lshrsi3 (after, after, shift));
+ convert_move (after_param, after, 1);
+ }
+
+ /* The previous sequence will end with a branch that's dependent on
+ the conditional store, so placing an isync will ensure that no
+ other instructions (especially, no load or store instructions)
+ can start before the atomic operation completes. */
+ if (sync_p)
+ emit_insn (gen_isync ());
+}
+
+/* A subroutine of the atomic operation splitters. Jump to LABEL if
+ COND is true. Mark the jump as unlikely to be taken. */
+
+static void
+emit_unlikely_jump (rtx cond, rtx label)
+{
+ rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
+ rtx x;
+
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
+}
+
+/* A subroutine of the atomic operation splitters. Emit a load-locked
+ instruction in MODE. */
+
+static void
+emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
+{
+ rtx (*fn) (rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_load_locked_si;
+ else if (mode == DImode)
+ fn = gen_load_locked_di;
+ emit_insn (fn (reg, mem));
+}
+
+/* A subroutine of the atomic operation splitters. Emit a store-conditional
+ instruction in MODE. */
+
+static void
+emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
+{
+ rtx (*fn) (rtx, rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_store_conditional_si;
+ else if (mode == DImode)
+ fn = gen_store_conditional_di;
+
+ /* Emit sync before stwcx. to address PPC405 Erratum. */
+ if (PPC405_ERRATUM77)
+ emit_insn (gen_memory_barrier ());
+
+ emit_insn (fn (res, mem, val));
+}
+
+/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+ to perform. MEM is the memory on which to operate. VAL is the second
+ operand of the binary operator. BEFORE and AFTER are optional locations to
+ return the value of MEM either before of after the operation. SCRATCH is
+ a scratch register. */
+
+void
+rs6000_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
+ rtx before, rtx after, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+
+ label = gen_label_rtx ();
+ emit_label (label);
+ label = gen_rtx_LABEL_REF (VOIDmode, label);
+
+ if (before == NULL_RTX)
+ before = scratch;
+ emit_load_locked (mode, before, mem);
+
+ if (code == NOT)
+ x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
+ else if (code == AND)
+ x = gen_rtx_UNSPEC (mode, gen_rtvec (2, before, val), UNSPEC_AND);
+ else
+ x = gen_rtx_fmt_ee (code, mode, before, val);
+
+ if (after != NULL_RTX)
+ emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
+
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_isync ());
+}
+
+/* Expand an atomic compare and swap operation. MEM is the memory on which
+ to operate. OLDVAL is the old value to be compared. NEWVAL is the new
+ value to be stored. SCRATCH is a scratch GPR. */
+
+void
+rs6000_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
+ rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (mode, retval, mem);
+
+ x = gen_rtx_COMPARE (CCmode, retval, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label2);
+
+ emit_move_insn (scratch, newval);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_isync ());
+ emit_label (XEXP (label2, 0));
+}
+
+/* Expand an atomic test and set operation. MEM is the memory on which
+ to operate. VAL is the value set. SCRATCH is a scratch GPR. */
+
+void
+rs6000_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+
+ label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label, 0));
+
+ emit_load_locked (mode, retval, mem);
+ emit_move_insn (scratch, val);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_isync ());
+}
+
+void
+rs6000_expand_compare_and_swapqhi (rtx dst, rtx mem, rtx oldval, rtx newval)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx addrSI, align, wdst, shift, mask;
+ HOST_WIDE_INT shift_mask = mode == QImode ? 0x18 : 0x10;
+ HOST_WIDE_INT imask = GET_MODE_MASK (mode);
+
+ /* Shift amount for subword relative to aligned word. */
+ addrSI = force_reg (SImode, gen_lowpart_common (SImode, XEXP (mem, 0)));
+ shift = gen_reg_rtx (SImode);
+ emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
+ GEN_INT (shift_mask)));
+ emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
+
+ /* Shift and mask old value into position within word. */
+ oldval = convert_modes (SImode, mode, oldval, 1);
+ oldval = expand_binop (SImode, and_optab,
+ oldval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (oldval, oldval, shift));
+
+ /* Shift and mask new value into position within word. */
+ newval = convert_modes (SImode, mode, newval, 1);
+ newval = expand_binop (SImode, and_optab,
+ newval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newval, newval, shift));
+
+ /* Mask for insertion. */
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ /* Address of aligned word containing subword. */
+ align = expand_binop (Pmode, and_optab, XEXP (mem, 0), GEN_INT (-4),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ mem = change_address (mem, SImode, align);
+ set_mem_align (mem, 32);
+ MEM_VOLATILE_P (mem) = 1;
+
+ wdst = gen_reg_rtx (SImode);
+ emit_insn (gen_sync_compare_and_swapqhi_internal (wdst, mask,
+ oldval, newval, mem));
+
+ emit_move_insn (dst, gen_lowpart (mode, wdst));
+}
+
+void
+rs6000_split_compare_and_swapqhi (rtx dest, rtx mask,
+ rtx oldval, rtx newval, rtx mem,
+ rtx scratch)
+{
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (SImode, scratch, mem);
+
+ /* Mask subword within loaded value for comparison with oldval.
+ Use UNSPEC_AND to avoid clobber.*/
+ emit_insn (gen_rtx_SET (SImode, dest,
+ gen_rtx_UNSPEC (SImode,
+ gen_rtvec (2, scratch, mask),
+ UNSPEC_AND)));
+
+ x = gen_rtx_COMPARE (CCmode, dest, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label2);
+
+ /* Clear subword within loaded value for insertion of new value. */
+ emit_insn (gen_rtx_SET (SImode, scratch,
+ gen_rtx_AND (SImode,
+ gen_rtx_NOT (SImode, mask), scratch)));
+ emit_insn (gen_iorsi3 (scratch, scratch, newval));
+ emit_store_conditional (SImode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_isync ());
+ emit_label (XEXP (label2, 0));
+}
+
+
+ /* Emit instructions to move SRC to DST. Called by splitters for
multi-register moves. It will emit at most one instruction for
each register that is accessed; that is, it won't emit li/lis pairs
(or equivalent for 64-bit code). One of SRC or DST must be a hard
@@ -10488,25 +12588,26 @@ rs6000_split_multireg_move (rtx dst, rtx src)
reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
mode = GET_MODE (dst);
- nregs = HARD_REGNO_NREGS (reg, mode);
+ nregs = hard_regno_nregs[reg][mode];
if (FP_REGNO_P (reg))
reg_mode = DFmode;
else if (ALTIVEC_REGNO_P (reg))
reg_mode = V16QImode;
+ else if (TARGET_E500_DOUBLE && mode == TFmode)
+ reg_mode = DFmode;
else
reg_mode = word_mode;
reg_mode_size = GET_MODE_SIZE (reg_mode);
-
- if (reg_mode_size * nregs != GET_MODE_SIZE (mode))
- abort ();
-
+
+ gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
+
if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
{
/* Move register range backwards, if we might have destructive
overlap. */
int i;
for (i = nregs - 1; i >= 0; i--)
- emit_insn (gen_rtx_SET (VOIDmode,
+ emit_insn (gen_rtx_SET (VOIDmode,
simplify_gen_subreg (reg_mode, dst, mode,
i * reg_mode_size),
simplify_gen_subreg (reg_mode, src, mode,
@@ -10518,31 +12619,29 @@ rs6000_split_multireg_move (rtx dst, rtx src)
int j = -1;
bool used_update = false;
- if (GET_CODE (src) == MEM && INT_REGNO_P (reg))
- {
- rtx breg;
+ if (MEM_P (src) && INT_REGNO_P (reg))
+ {
+ rtx breg;
if (GET_CODE (XEXP (src, 0)) == PRE_INC
|| GET_CODE (XEXP (src, 0)) == PRE_DEC)
{
rtx delta_rtx;
breg = XEXP (XEXP (src, 0), 0);
- delta_rtx = GET_CODE (XEXP (src, 0)) == PRE_INC
- ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
- : GEN_INT (-GET_MODE_SIZE (GET_MODE (src)));
+ delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
emit_insn (TARGET_32BIT
? gen_addsi3 (breg, breg, delta_rtx)
: gen_adddi3 (breg, breg, delta_rtx));
- src = gen_rtx_MEM (mode, breg);
+ src = replace_equiv_address (src, breg);
}
- else if (! offsettable_memref_p (src))
+ else if (! rs6000_offsettable_memref_p (src))
{
- rtx newsrc, basereg;
+ rtx basereg;
basereg = gen_rtx_REG (Pmode, reg);
emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
- newsrc = gen_rtx_MEM (GET_MODE (src), basereg);
- MEM_COPY_ATTRIBUTES (newsrc, src);
- src = newsrc;
+ src = replace_equiv_address (src, basereg);
}
breg = XEXP (src, 0);
@@ -10566,9 +12665,9 @@ rs6000_split_multireg_move (rtx dst, rtx src)
{
rtx delta_rtx;
breg = XEXP (XEXP (dst, 0), 0);
- delta_rtx = GET_CODE (XEXP (dst, 0)) == PRE_INC
- ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
- : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst)));
+ delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
/* We have to update the breg before doing the store.
Use store with update, if available. */
@@ -10577,32 +12676,34 @@ rs6000_split_multireg_move (rtx dst, rtx src)
{
rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
emit_insn (TARGET_32BIT
- ? gen_movsi_update (breg, breg, delta_rtx, nsrc)
- : gen_movdi_update (breg, breg, delta_rtx, nsrc));
+ ? (TARGET_POWERPC64
+ ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
+ : gen_movsi_update (breg, breg, delta_rtx, nsrc))
+ : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
used_update = true;
}
else
emit_insn (TARGET_32BIT
? gen_addsi3 (breg, breg, delta_rtx)
: gen_adddi3 (breg, breg, delta_rtx));
- dst = gen_rtx_MEM (mode, breg);
+ dst = replace_equiv_address (dst, breg);
}
- else if (! offsettable_memref_p (dst))
- abort ();
+ else
+ gcc_assert (rs6000_offsettable_memref_p (dst));
}
for (i = 0; i < nregs; i++)
- {
+ {
/* Calculate index to next subword. */
++j;
- if (j == nregs)
+ if (j == nregs)
j = 0;
- /* If compiler already emitted move of first word by
+ /* If compiler already emitted move of first word by
store with update, no need to do anything. */
if (j == 0 && used_update)
continue;
-
+
emit_insn (gen_rtx_SET (VOIDmode,
simplify_gen_subreg (reg_mode, dst, mode,
j * reg_mode_size),
@@ -10626,7 +12727,7 @@ first_reg_to_save (void)
/* Find lowest numbered live register. */
for (first_reg = 13; first_reg <= 31; first_reg++)
- if (regs_ever_live[first_reg]
+ if (regs_ever_live[first_reg]
&& (! call_used_regs[first_reg]
|| (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
@@ -10670,6 +12771,13 @@ first_altivec_reg_to_save (void)
if (! TARGET_ALTIVEC_ABI)
return LAST_ALTIVEC_REGNO + 1;
+ /* On Darwin, the unwind routines are compiled without
+ TARGET_ALTIVEC, and use save_world to save/restore the
+ altivec registers when necessary. */
+ if (DEFAULT_ABI == ABI_DARWIN && current_function_calls_eh_return
+ && ! TARGET_ALTIVEC)
+ return FIRST_ALTIVEC_REGNO + 20;
+
/* Find lowest numbered live register. */
for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
if (regs_ever_live[i])
@@ -10687,6 +12795,13 @@ compute_vrsave_mask (void)
{
unsigned int i, mask = 0;
+ /* On Darwin, the unwind routines are compiled without
+ TARGET_ALTIVEC, and use save_world to save/restore the
+ call-saved altivec registers when necessary. */
+ if (DEFAULT_ABI == ABI_DARWIN && current_function_calls_eh_return
+ && ! TARGET_ALTIVEC)
+ mask |= 0xFFF;
+
/* First, find out if we use _any_ altivec registers. */
for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
if (regs_ever_live[i])
@@ -10714,6 +12829,59 @@ compute_vrsave_mask (void)
return mask;
}
+/* For a very restricted set of circumstances, we can cut down the
+ size of prologues/epilogues by calling our own save/restore-the-world
+ routines. */
+
+static void
+compute_save_world_info (rs6000_stack_t *info_ptr)
+{
+ info_ptr->world_save_p = 1;
+ info_ptr->world_save_p
+ = (WORLD_SAVE_P (info_ptr)
+ && DEFAULT_ABI == ABI_DARWIN
+ && ! (current_function_calls_setjmp && flag_exceptions)
+ && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
+ && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
+ && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
+ && info_ptr->cr_save_p);
+
+ /* This will not work in conjunction with sibcalls. Make sure there
+ are none. (This check is expensive, but seldom executed.) */
+ if (WORLD_SAVE_P (info_ptr))
+ {
+ rtx insn;
+ for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
+ if ( GET_CODE (insn) == CALL_INSN
+ && SIBLING_CALL_P (insn))
+ {
+ info_ptr->world_save_p = 0;
+ break;
+ }
+ }
+
+ if (WORLD_SAVE_P (info_ptr))
+ {
+ /* Even if we're not touching VRsave, make sure there's room on the
+ stack for it, if it looks like we're calling SAVE_WORLD, which
+ will attempt to save it. */
+ info_ptr->vrsave_size = 4;
+
+ /* "Save" the VRsave register too if we're saving the world. */
+ if (info_ptr->vrsave_mask == 0)
+ info_ptr->vrsave_mask = compute_vrsave_mask ();
+
+ /* Because the Darwin register save/restore routines only handle
+ F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
+ check. */
+ gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
+ && (info_ptr->first_altivec_reg_save
+ >= FIRST_SAVED_ALTIVEC_REGNO));
+ }
+ return;
+}
+
+
static void
is_altivec_return_reg (rtx reg, void *xyes)
{
@@ -10777,11 +12945,11 @@ is_altivec_return_reg (rtx reg, void *xyes)
| Parameter save area (P) | 8
+---------------------------------------+
| Alloca space (A) | 8+P
- +---------------------------------------+
+ +---------------------------------------+
| Varargs save area (V) | 8+P+A
- +---------------------------------------+
+ +---------------------------------------+
| Local variable space (L) | 8+P+A+V
- +---------------------------------------+
+ +---------------------------------------+
| Float/int conversion temporary (X) | 8+P+A+V+L
+---------------------------------------+
| Save area for AltiVec registers (W) | 8+P+A+V+L+X
@@ -10790,14 +12958,14 @@ is_altivec_return_reg (rtx reg, void *xyes)
+---------------------------------------+
| Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
+---------------------------------------+
- | SPE: area for 64-bit GP registers |
- +---------------------------------------+
- | SPE alignment padding |
- +---------------------------------------+
+ | SPE: area for 64-bit GP registers |
+ +---------------------------------------+
+ | SPE alignment padding |
+ +---------------------------------------+
| saved CR (C) | 8+P+A+V+L+X+W+Y+Z
- +---------------------------------------+
+ +---------------------------------------+
| Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
- +---------------------------------------+
+ +---------------------------------------+
| Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
+---------------------------------------+
old SP->| back chain to caller's caller |
@@ -10823,24 +12991,22 @@ is_altivec_return_reg (rtx reg, void *xyes)
static rs6000_stack_t *
rs6000_stack_info (void)
{
- static rs6000_stack_t info, zero_info;
+ static rs6000_stack_t info;
rs6000_stack_t *info_ptr = &info;
int reg_size = TARGET_32BIT ? 4 : 8;
int ehrd_size;
int save_align;
HOST_WIDE_INT non_fixed_size;
- /* Zero all fields portably. */
- info = zero_info;
+ memset (&info, 0, sizeof (info));
if (TARGET_SPE)
{
/* Cache value so we don't rescan instruction chain over and over. */
if (cfun->machine->insn_chain_scanned_p == 0)
- {
- cfun->machine->insn_chain_scanned_p = 1;
- info_ptr->spe_64bit_regs_used = (int) spe_func_has_64bit_regs_p ();
- }
+ cfun->machine->insn_chain_scanned_p
+ = spe_func_has_64bit_regs_p () + 1;
+ info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
}
/* Select which calling sequence. */
@@ -10848,7 +13014,7 @@ rs6000_stack_info (void)
/* Calculate which registers need to be saved & save area size. */
info_ptr->first_gp_reg_save = first_reg_to_save ();
- /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
+ /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
even if it currently looks like we won't. */
if (((TARGET_TOC && TARGET_MINIMAL_TOC)
|| (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
@@ -10885,10 +13051,9 @@ rs6000_stack_info (void)
|| cfun->machine->ra_needs_full_frame);
/* Determine if we need to save the link register. */
- if (rs6000_ra_ever_killed ()
- || (DEFAULT_ABI == ABI_AIX
- && current_function_profile
- && !TARGET_PROFILE_KERNEL)
+ if ((DEFAULT_ABI == ABI_AIX
+ && current_function_profile
+ && !TARGET_PROFILE_KERNEL)
#ifdef TARGET_RELOCATABLE
|| (TARGET_RELOCATABLE && (get_pool_size () != 0))
#endif
@@ -10896,17 +13061,15 @@ rs6000_stack_info (void)
&& !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
|| info_ptr->first_altivec_reg_save <= LAST_ALTIVEC_REGNO
|| (DEFAULT_ABI == ABI_V4 && current_function_calls_alloca)
- || (DEFAULT_ABI == ABI_DARWIN
- && flag_pic
- && current_function_uses_pic_offset_table)
- || info_ptr->calls_p)
+ || info_ptr->calls_p
+ || rs6000_ra_ever_killed ())
{
info_ptr->lr_save_p = 1;
regs_ever_live[LINK_REGISTER_REGNUM] = 1;
}
/* Determine if we need to save the condition code registers. */
- if (regs_ever_live[CR2_REGNO]
+ if (regs_ever_live[CR2_REGNO]
|| regs_ever_live[CR3_REGNO]
|| regs_ever_live[CR4_REGNO])
{
@@ -10935,10 +13098,16 @@ rs6000_stack_info (void)
/* Determine various sizes. */
info_ptr->reg_size = reg_size;
info_ptr->fixed_size = RS6000_SAVE_AREA;
- info_ptr->varargs_size = RS6000_VARARGS_AREA;
info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
info_ptr->parm_size = RS6000_ALIGN (current_function_outgoing_args_size,
TARGET_ALTIVEC ? 16 : 8);
+ if (FRAME_GROWS_DOWNWARD)
+ info_ptr->vars_size
+ += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
+ + info_ptr->parm_size,
+ ABI_STACK_BOUNDARY / BITS_PER_UNIT)
+ - (info_ptr->fixed_size + info_ptr->vars_size
+ + info_ptr->parm_size);
if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
info_ptr->spe_gp_size = 8 * (32 - info_ptr->first_gp_reg_save);
@@ -10955,12 +13124,14 @@ rs6000_stack_info (void)
else
info_ptr->vrsave_size = 0;
+ compute_save_world_info (info_ptr);
+
/* Calculate the offsets. */
switch (DEFAULT_ABI)
{
case ABI_NONE:
default:
- abort ();
+ gcc_unreachable ();
case ABI_AIX:
case ABI_DARWIN:
@@ -10972,10 +13143,11 @@ rs6000_stack_info (void)
info_ptr->vrsave_save_offset
= info_ptr->gp_save_offset - info_ptr->vrsave_size;
- /* Align stack so vector save area is on a quadword boundary. */
+ /* Align stack so vector save area is on a quadword boundary.
+ The padding goes above the vectors. */
if (info_ptr->altivec_size != 0)
info_ptr->altivec_padding_size
- = 16 - (-info_ptr->vrsave_save_offset % 16);
+ = info_ptr->vrsave_save_offset & 0xF;
else
info_ptr->altivec_padding_size = 0;
@@ -10983,6 +13155,8 @@ rs6000_stack_info (void)
= info_ptr->vrsave_save_offset
- info_ptr->altivec_padding_size
- info_ptr->altivec_size;
+ gcc_assert (info_ptr->altivec_size == 0
+ || info_ptr->altivec_save_offset % 16 == 0);
/* Adjust for AltiVec case. */
info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
@@ -10999,24 +13173,23 @@ rs6000_stack_info (void)
info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
- {
- /* Align stack so SPE GPR save area is aligned on a
- double-word boundary. */
- if (info_ptr->spe_gp_size != 0)
- info_ptr->spe_padding_size
- = 8 - (-info_ptr->cr_save_offset % 8);
- else
- info_ptr->spe_padding_size = 0;
-
- info_ptr->spe_gp_save_offset
- = info_ptr->cr_save_offset
- - info_ptr->spe_padding_size
- - info_ptr->spe_gp_size;
-
- /* Adjust for SPE case. */
- info_ptr->toc_save_offset
- = info_ptr->spe_gp_save_offset - info_ptr->toc_size;
- }
+ {
+ /* Align stack so SPE GPR save area is aligned on a
+ double-word boundary. */
+ if (info_ptr->spe_gp_size != 0)
+ info_ptr->spe_padding_size
+ = 8 - (-info_ptr->cr_save_offset % 8);
+ else
+ info_ptr->spe_padding_size = 0;
+
+ info_ptr->spe_gp_save_offset
+ = info_ptr->cr_save_offset
+ - info_ptr->spe_padding_size
+ - info_ptr->spe_gp_size;
+
+ /* Adjust for SPE case. */
+ info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
+ }
else if (TARGET_ALTIVEC_ABI)
{
info_ptr->vrsave_save_offset
@@ -11035,12 +13208,11 @@ rs6000_stack_info (void)
- info_ptr->altivec_size;
/* Adjust for AltiVec case. */
- info_ptr->toc_save_offset
- = info_ptr->altivec_save_offset - info_ptr->toc_size;
+ info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
}
else
- info_ptr->toc_save_offset = info_ptr->cr_save_offset - info_ptr->toc_size;
- info_ptr->ehrd_offset = info_ptr->toc_save_offset - ehrd_size;
+ info_ptr->ehrd_offset = info_ptr->cr_save_offset;
+ info_ptr->ehrd_offset -= ehrd_size;
info_ptr->lr_save_offset = reg_size;
break;
}
@@ -11054,15 +13226,12 @@ rs6000_stack_info (void)
+ info_ptr->spe_padding_size
+ ehrd_size
+ info_ptr->cr_size
- + info_ptr->lr_size
- + info_ptr->vrsave_size
- + info_ptr->toc_size,
+ + info_ptr->vrsave_size,
save_align);
non_fixed_size = (info_ptr->vars_size
+ info_ptr->parm_size
- + info_ptr->save_size
- + info_ptr->varargs_size);
+ + info_ptr->save_size);
info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
ABI_STACK_BOUNDARY / BITS_PER_UNIT);
@@ -11118,9 +13287,6 @@ rs6000_stack_info (void)
if (! info_ptr->cr_save_p)
info_ptr->cr_save_offset = 0;
- if (! info_ptr->toc_save_p)
- info_ptr->toc_save_offset = 0;
-
return info_ptr;
}
@@ -11147,10 +13313,23 @@ spe_func_has_64bit_regs_p (void)
{
rtx i;
+ /* FIXME: This should be implemented with attributes...
+
+ (set_attr "spe64" "true")....then,
+ if (get_spe64(insn)) return true;
+
+ It's the only reliable way to do the stuff below. */
+
i = PATTERN (insn);
- if (GET_CODE (i) == SET
- && SPE_VECTOR_MODE (GET_MODE (SET_SRC (i))))
- return true;
+ if (GET_CODE (i) == SET)
+ {
+ enum machine_mode mode = GET_MODE (SET_SRC (i));
+
+ if (SPE_VECTOR_MODE (mode))
+ return true;
+ if (TARGET_E500_DOUBLE && mode == DFmode)
+ return true;
+ }
}
}
@@ -11203,9 +13382,6 @@ debug_stack_info (rs6000_stack_t *info)
if (info->cr_save_p)
fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
- if (info->toc_save_p)
- fprintf (stderr, "\ttoc_save_p = %5d\n", info->toc_save_p);
-
if (info->vrsave_mask)
fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
@@ -11239,9 +13415,6 @@ debug_stack_info (rs6000_stack_t *info)
if (info->cr_save_offset)
fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
- if (info->toc_save_offset)
- fprintf (stderr, "\ttoc_save_offset = %5d\n", info->toc_save_offset);
-
if (info->varargs_save_offset)
fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
@@ -11249,9 +13422,6 @@ debug_stack_info (rs6000_stack_t *info)
fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
info->total_size);
- if (info->varargs_size)
- fprintf (stderr, "\tvarargs_size = %5d\n", info->varargs_size);
-
if (info->vars_size)
fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
info->vars_size);
@@ -11285,15 +13455,9 @@ debug_stack_info (rs6000_stack_t *info)
fprintf (stderr, "\tspe_padding_size = %5d\n",
info->spe_padding_size);
- if (info->lr_size)
- fprintf (stderr, "\tlr_size = %5d\n", info->lr_size);
-
if (info->cr_size)
fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
- if (info->toc_size)
- fprintf (stderr, "\ttoc_size = %5d\n", info->toc_size);
-
if (info->save_size)
fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
@@ -11342,14 +13506,14 @@ rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
if (decl)
{
if (TARGET_ALTIVEC_VRSAVE)
- {
+ {
for (type = TYPE_ARG_TYPES (TREE_TYPE (decl));
type; type = TREE_CHAIN (type))
{
if (TREE_CODE (TREE_VALUE (type)) == VECTOR_TYPE)
return false;
}
- }
+ }
if (DEFAULT_ABI == ABI_DARWIN
|| (*targetm.binds_local_p) (decl))
{
@@ -11363,6 +13527,24 @@ rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
return false;
}
+/* NULL if INSN insn is valid within a low-overhead loop.
+ Otherwise return why doloop cannot be applied.
+ PowerPC uses the COUNT register for branch on table instructions. */
+
+static const char *
+rs6000_invalid_within_doloop (rtx insn)
+{
+ if (CALL_P (insn))
+ return "Function call in the loop.";
+
+ if (JUMP_P (insn)
+ && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC))
+ return "Computed branch in the loop.";
+
+ return NULL;
+}
+
static int
rs6000_ra_ever_killed (void)
{
@@ -11388,7 +13570,7 @@ rs6000_ra_ever_killed (void)
When we're called from the epilogue, we need to avoid counting
this as a store. */
-
+
push_topmost_sequence ();
top = get_insns ();
pop_topmost_sequence ();
@@ -11398,10 +13580,12 @@ rs6000_ra_ever_killed (void)
{
if (INSN_P (insn))
{
- if (FIND_REG_INC_NOTE (insn, reg))
- return 1;
- else if (GET_CODE (insn) == CALL_INSN
- && !SIBLING_CALL_P (insn))
+ if (CALL_P (insn))
+ {
+ if (!SIBLING_CALL_P (insn))
+ return 1;
+ }
+ else if (find_regno_note (insn, REG_INC, LINK_REGISTER_REGNUM))
return 1;
else if (set_of (reg, insn) != NULL_RTX
&& !prologue_epilogue_contains (insn))
@@ -11430,15 +13614,49 @@ rs6000_emit_load_toc_table (int fromprolog)
rtx dest, insn;
dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
- if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
+ {
+ char buf[30];
+ rtx lab, tmp1, tmp2, got, tempLR;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ if (flag_pic == 2)
+ got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
+ else
+ got = rs6000_got_sym ();
+ tmp1 = tmp2 = dest;
+ if (!fromprolog)
+ {
+ tmp1 = gen_reg_rtx (Pmode);
+ tmp2 = gen_reg_rtx (Pmode);
+ }
+ tempLR = (fromprolog
+ ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+ : gen_reg_rtx (Pmode));
+ insn = emit_insn (gen_load_toc_v4_PIC_1 (tempLR, lab));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_move_insn (tmp1, tempLR);
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ }
+ else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
{
- rtx temp = (fromprolog
- ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
- : gen_reg_rtx (Pmode));
- insn = emit_insn (gen_load_toc_v4_pic_si (temp));
+ rtx tempLR = (fromprolog
+ ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+ : gen_reg_rtx (Pmode));
+
+ insn = emit_insn (gen_load_toc_v4_pic_si (tempLR));
if (fromprolog)
rs6000_maybe_dead (insn);
- insn = emit_move_insn (dest, temp);
+ insn = emit_move_insn (dest, tempLR);
if (fromprolog)
rs6000_maybe_dead (insn);
}
@@ -11452,13 +13670,6 @@ rs6000_emit_load_toc_table (int fromprolog)
? gen_rtx_REG (Pmode, 0)
: gen_reg_rtx (Pmode));
- /* possibly create the toc section */
- if (! toc_initialized)
- {
- toc_section ();
- function_section (current_function_decl);
- }
-
if (fromprolog)
{
rtx symF, symL;
@@ -11504,8 +13715,10 @@ rs6000_emit_load_toc_table (int fromprolog)
if (fromprolog)
rs6000_maybe_dead (insn);
}
- else if (DEFAULT_ABI == ABI_AIX)
+ else
{
+ gcc_assert (DEFAULT_ABI == ABI_AIX);
+
if (TARGET_32BIT)
insn = emit_insn (gen_load_toc_aix_si (dest));
else
@@ -11513,8 +13726,6 @@ rs6000_emit_load_toc_table (int fromprolog)
if (fromprolog)
rs6000_maybe_dead (insn);
}
- else
- abort ();
}
/* Emit instructions to restore the link register after determining where
@@ -11539,14 +13750,15 @@ rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
|| current_function_calls_alloca
|| info->total_size > 32767)
{
- emit_move_insn (operands[1], gen_rtx_MEM (Pmode, frame_rtx));
+ tmp = gen_frame_mem (Pmode, frame_rtx);
+ emit_move_insn (operands[1], tmp);
frame_rtx = operands[1];
}
else if (info->push_p)
sp_offset = info->total_size;
tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
- tmp = gen_rtx_MEM (Pmode, tmp);
+ tmp = gen_frame_mem (Pmode, tmp);
emit_move_insn (tmp, operands[0]);
}
else
@@ -11555,20 +13767,20 @@ rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
static GTY(()) int set = -1;
-int
+int
get_TOC_alias_set (void)
{
if (set == -1)
set = new_alias_set ();
return set;
-}
+}
/* This returns nonzero if the current function uses the TOC. This is
determined by the presence of (use (unspec ... UNSPEC_TOC)), which
is generated by the ABI_V4 load_toc_* patterns. */
#if TARGET_ELF
static int
-uses_TOC (void)
+uses_TOC (void)
{
rtx insn;
@@ -11578,7 +13790,7 @@ uses_TOC (void)
rtx pat = PATTERN (insn);
int i;
- if (GET_CODE (pat) == PARALLEL)
+ if (GET_CODE (pat) == PARALLEL)
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx sub = XVECEXP (pat, 0, i);
@@ -11596,14 +13808,14 @@ uses_TOC (void)
#endif
rtx
-create_TOC_reference (rtx symbol)
+create_TOC_reference (rtx symbol)
{
if (no_new_pseudos)
regs_ever_live[TOC_REGISTER] = 1;
- return gen_rtx_PLUS (Pmode,
+ return gen_rtx_PLUS (Pmode,
gen_rtx_REG (Pmode, TOC_REGISTER),
- gen_rtx_CONST (Pmode,
- gen_rtx_MINUS (Pmode, symbol,
+ gen_rtx_CONST (Pmode,
+ gen_rtx_MINUS (Pmode, symbol,
gen_rtx_SYMBOL_REF (Pmode, toc_label_name))));
}
@@ -11621,12 +13833,12 @@ rs6000_aix_emit_builtin_unwind_init (void)
rtx tocompare = gen_reg_rtx (SImode);
rtx no_toc_save_needed = gen_label_rtx ();
- mem = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
+ mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
emit_move_insn (stack_top, mem);
- mem = gen_rtx_MEM (Pmode,
- gen_rtx_PLUS (Pmode, stack_top,
- GEN_INT (2 * GET_MODE_SIZE (Pmode))));
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (2 * GET_MODE_SIZE (Pmode))));
emit_move_insn (opcode_addr, mem);
emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
@@ -11636,22 +13848,22 @@ rs6000_aix_emit_builtin_unwind_init (void)
SImode, NULL_RTX, NULL_RTX,
no_toc_save_needed);
- mem = gen_rtx_MEM (Pmode,
- gen_rtx_PLUS (Pmode, stack_top,
- GEN_INT (5 * GET_MODE_SIZE (Pmode))));
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (5 * GET_MODE_SIZE (Pmode))));
emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
emit_label (no_toc_save_needed);
}
-/* This ties together stack memory (MEM with an alias set of
- rs6000_sr_alias_set) and the change to the stack pointer. */
+/* This ties together stack memory (MEM with an alias set of frame_alias_set)
+ and the change to the stack pointer. */
static void
rs6000_emit_stack_tie (void)
{
- rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
+ rtx mem = gen_frame_mem (BLKmode,
+ gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
- set_mem_alias_set (mem, rs6000_sr_alias_set);
emit_insn (gen_stack_tie (mem));
}
@@ -11665,12 +13877,19 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
rtx insn;
rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
rtx tmp_reg = gen_rtx_REG (Pmode, 0);
- rtx todec = GEN_INT (-size);
+ rtx todec = gen_int_mode (-size, Pmode);
+
+ if (INTVAL (todec) != -size)
+ {
+ warning (0, "stack frame too large");
+ emit_insn (gen_trap ());
+ return;
+ }
if (current_function_limit_stack)
{
if (REG_P (stack_limit_rtx)
- && REGNO (stack_limit_rtx) > 1
+ && REGNO (stack_limit_rtx) > 1
&& REGNO (stack_limit_rtx) <= 31)
{
emit_insn (TARGET_32BIT
@@ -11689,8 +13908,8 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
&& DEFAULT_ABI == ABI_V4)
{
rtx toload = gen_rtx_CONST (VOIDmode,
- gen_rtx_PLUS (Pmode,
- stack_limit_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_limit_rtx,
GEN_INT (size)));
emit_insn (gen_elf_high (tmp_reg, toload));
@@ -11699,7 +13918,7 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
const0_rtx));
}
else
- warning ("stack limit expression is not supported");
+ warning (0, "stack limit expression is not supported");
}
if (copy_r12 || ! TARGET_UPDATE)
@@ -11710,7 +13929,7 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
if (size > 32767)
{
/* Need a note here so that try_split doesn't get confused. */
- if (get_last_insn() == NULL_RTX)
+ if (get_last_insn () == NULL_RTX)
emit_note (NOTE_INSN_DELETED);
insn = emit_move_insn (tmp_reg, todec);
try_split (PATTERN (insn), insn, 0);
@@ -11720,7 +13939,7 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
insn = emit_insn (TARGET_32BIT
? gen_movsi_update (stack_reg, stack_reg,
todec, stack_reg)
- : gen_movdi_update (stack_reg, stack_reg,
+ : gen_movdi_di_update (stack_reg, stack_reg,
todec, stack_reg));
}
else
@@ -11731,11 +13950,11 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
emit_move_insn (gen_rtx_MEM (Pmode, stack_reg),
gen_rtx_REG (Pmode, 12));
}
-
+
RTX_FRAME_RELATED_P (insn) = 1;
- REG_NOTES (insn) =
+ REG_NOTES (insn) =
gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, stack_reg,
+ gen_rtx_SET (VOIDmode, stack_reg,
gen_rtx_PLUS (Pmode, stack_reg,
GEN_INT (-size))),
REG_NOTES (insn));
@@ -11748,7 +13967,7 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
its hand so much. */
static void
-rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
+rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
rtx reg2, rtx rreg)
{
rtx real, temp;
@@ -11765,12 +13984,12 @@ rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
if (reg2 != NULL_RTX)
real = replace_rtx (real, reg2, rreg);
-
- real = replace_rtx (real, reg,
+
+ real = replace_rtx (real, reg,
gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
STACK_POINTER_REGNUM),
GEN_INT (val)));
-
+
/* We expect that 'real' is either a SET or a PARALLEL containing
SETs (and possibly other stuff). In a PARALLEL, all the SETs
are important so they all have to be marked RTX_FRAME_RELATED_P. */
@@ -11778,7 +13997,7 @@ rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
if (GET_CODE (real) == SET)
{
rtx set = real;
-
+
temp = simplify_rtx (SET_SRC (set));
if (temp)
SET_SRC (set) = temp;
@@ -11792,14 +14011,16 @@ rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
XEXP (SET_DEST (set), 0) = temp;
}
}
- else if (GET_CODE (real) == PARALLEL)
+ else
{
int i;
+
+ gcc_assert (GET_CODE (real) == PARALLEL);
for (i = 0; i < XVECLEN (real, 0); i++)
if (GET_CODE (XVECEXP (real, 0, i)) == SET)
{
rtx set = XVECEXP (real, 0, i);
-
+
temp = simplify_rtx (SET_SRC (set));
if (temp)
SET_SRC (set) = temp;
@@ -11815,8 +14036,6 @@ rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
RTX_FRAME_RELATED_P (set) = 1;
}
}
- else
- abort ();
if (TARGET_SPE)
real = spe_synthesize_frame_save (real);
@@ -11845,10 +14064,9 @@ spe_synthesize_frame_save (rtx real)
This is so we can differentiate between 64-bit and 32-bit saves.
Words cannot describe this nastiness. */
- if (GET_CODE (SET_DEST (real)) != MEM
- || GET_CODE (XEXP (SET_DEST (real), 0)) != PLUS
- || GET_CODE (SET_SRC (real)) != REG)
- abort ();
+ gcc_assert (GET_CODE (SET_DEST (real)) == MEM
+ && GET_CODE (XEXP (SET_DEST (real), 0)) == PLUS
+ && GET_CODE (SET_SRC (real)) == REG);
/* Transform:
(set (mem (plus (reg x) (const y)))
@@ -11905,7 +14123,7 @@ generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
vrsave,
gen_rtx_UNSPEC_VOLATILE (SImode,
gen_rtvec (2, reg, vrsave),
- 30));
+ UNSPECV_SET_VRSAVE));
nclobs = 1;
@@ -11953,7 +14171,7 @@ generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
static void
-emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
+emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
unsigned int regno, int offset, HOST_WIDE_INT total_size)
{
rtx reg, offset_rtx, insn, mem, addr, int_rtx;
@@ -11963,12 +14181,13 @@ emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
/* Some cases that need register indexed addressing. */
if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ || (TARGET_E500_DOUBLE && mode == DFmode)
|| (TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode)
&& !SPE_CONST_OFFSET_OK (offset)))
{
/* Whomever calls us must make sure r11 is available in the
- flow path of instructions in the prologue. */
+ flow path of instructions in the prologue. */
offset_rtx = gen_rtx_REG (Pmode, 11);
emit_move_insn (offset_rtx, int_rtx);
@@ -11984,8 +14203,7 @@ emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
reg = gen_rtx_REG (mode, regno);
addr = gen_rtx_PLUS (Pmode, frame_reg, offset_rtx);
- mem = gen_rtx_MEM (mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (mode, addr);
insn = emit_move_insn (mem, reg);
@@ -12002,7 +14220,8 @@ gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
int_rtx = GEN_INT (offset);
- if (TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
+ if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
+ || (TARGET_E500_DOUBLE && mode == DFmode))
{
offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
emit_move_insn (offset_rtx, int_rtx);
@@ -12010,9 +14229,26 @@ gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
else
offset_rtx = int_rtx;
- return gen_rtx_MEM (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
+ return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
}
+/* Look for user-defined global regs. We should not save and restore these,
+ and cannot use stmw/lmw if there are any in its range. */
+
+static bool
+no_global_regs_above (int first_greg)
+{
+ int i;
+ for (i = 0; i < 32 - first_greg; i++)
+ if (global_regs[first_greg + i])
+ return false;
+ return true;
+}
+
+#ifndef TARGET_FIX_AND_CONTINUE
+#define TARGET_FIX_AND_CONTINUE 0
+#endif
+
/* Emit function prologue as insns. */
void
@@ -12029,24 +14265,40 @@ rs6000_emit_prologue (void)
int saving_FPRs_inline;
int using_store_multiple;
HOST_WIDE_INT sp_offset = 0;
-
- if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
- {
- reg_mode = V2SImode;
- reg_size = 8;
- }
+
+ if (TARGET_FIX_AND_CONTINUE)
+ {
+ /* gdb on darwin arranges to forward a function from the old
+ address by modifying the first 5 instructions of the function
+ to branch to the overriding function. This is necessary to
+ permit function pointers that point to the old function to
+ actually forward to the new function. */
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ }
+
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ reg_mode = V2SImode;
+ reg_size = 8;
+ }
using_store_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64
&& (!TARGET_SPE_ABI
|| info->spe_64bit_regs_used == 0)
- && info->first_gp_reg_save < 31);
+ && info->first_gp_reg_save < 31
+ && no_global_regs_above (info->first_gp_reg_save));
saving_FPRs_inline = (info->first_fp_reg_save == 64
|| FP_SAVE_INLINE (info->first_fp_reg_save)
|| current_function_calls_eh_return
|| cfun->machine->ra_need_lr);
/* For V.4, update stack before we do any saving and set back pointer. */
- if (info->push_p
+ if (! WORLD_SAVE_P (info)
+ && info->push_p
&& (DEFAULT_ABI == ABI_V4
|| current_function_calls_eh_return))
{
@@ -12054,7 +14306,7 @@ rs6000_emit_prologue (void)
sp_offset = info->total_size;
else
frame_reg_rtx = frame_ptr_rtx;
- rs6000_emit_allocate_stack (info->total_size,
+ rs6000_emit_allocate_stack (info->total_size,
(frame_reg_rtx != sp_reg_rtx
&& (info->cr_save_p
|| info->lr_save_p
@@ -12065,8 +14317,123 @@ rs6000_emit_prologue (void)
rs6000_emit_stack_tie ();
}
+ /* Handle world saves specially here. */
+ if (WORLD_SAVE_P (info))
+ {
+ int i, j, sz;
+ rtx treg;
+ rtvec p;
+ rtx reg0;
+
+ /* save_world expects lr in r0. */
+ reg0 = gen_rtx_REG (Pmode, 0);
+ if (info->lr_save_p)
+ {
+ insn = emit_move_insn (reg0,
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
+ assumptions about the offsets of various bits of the stack
+ frame. */
+ gcc_assert (info->gp_save_offset == -220
+ && info->fp_save_offset == -144
+ && info->lr_save_offset == 8
+ && info->cr_save_offset == 4
+ && info->push_p
+ && info->lr_save_p
+ && (!current_function_calls_eh_return
+ || info->ehrd_offset == -432)
+ && info->vrsave_save_offset == -224
+ && info->altivec_save_offset == -416);
+
+ treg = gen_rtx_REG (SImode, 11);
+ emit_move_insn (treg, GEN_INT (-info->total_size));
+
+ /* SAVE_WORLD takes the caller's LR in R0 and the frame size
+ in R11. It also clobbers R12, so beware! */
+
+ /* Preserve CR2 for save_world prologues */
+ sz = 5;
+ sz += 32 - info->first_gp_reg_save;
+ sz += 64 - info->first_fp_reg_save;
+ sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
+ p = rtvec_alloc (sz);
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ "*save_world"));
+ /* We do floats first so that the instruction pattern matches
+ properly. */
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset + 8 * i));
+ rtx mem = gen_frame_mem (DFmode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + sp_offset + 16 * i));
+ rtx mem = gen_frame_mem (V4SImode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset
+ + sp_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ /* Explain about use of R0. */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset
+ + sp_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg0);
+ }
+ /* Explain what happens to the stack pointer. */
+ {
+ rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
+ }
+
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ treg, GEN_INT (-info->total_size));
+ sp_offset = info->total_size;
+ }
+
/* Save AltiVec registers if needed. */
- if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
+ if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI && info->altivec_size != 0)
{
int i;
@@ -12087,10 +14454,8 @@ rs6000_emit_prologue (void)
emit_move_insn (areg, GEN_INT (offset));
/* AltiVec addressing mode is [reg+reg]. */
- mem = gen_rtx_MEM (V4SImode,
- gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (V4SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
insn = emit_move_insn (mem, savereg);
@@ -12122,13 +14487,15 @@ rs6000_emit_prologue (void)
else
emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
- /* Save VRSAVE. */
- offset = info->vrsave_save_offset + sp_offset;
- mem
- = gen_rtx_MEM (SImode,
- gen_rtx_PLUS (Pmode, frame_reg_rtx, GEN_INT (offset)));
- set_mem_alias_set (mem, rs6000_sr_alias_set);
- insn = emit_move_insn (mem, reg);
+ if (!WORLD_SAVE_P (info))
+ {
+ /* Save VRSAVE. */
+ offset = info->vrsave_save_offset + sp_offset;
+ mem = gen_frame_mem (SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (offset)));
+ insn = emit_move_insn (mem, reg);
+ }
/* Include the registers in the mask. */
emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
@@ -12137,40 +14504,57 @@ rs6000_emit_prologue (void)
}
/* If we use the link register, get it into r0. */
- if (info->lr_save_p)
- emit_move_insn (gen_rtx_REG (Pmode, 0),
- gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ if (!WORLD_SAVE_P (info) && info->lr_save_p)
+ {
+ insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
/* If we need to save CR, put it into r12. */
- if (info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
+ if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
{
+ rtx set;
+
cr_save_rtx = gen_rtx_REG (SImode, 12);
- emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ /* Now, there's no way that dwarf2out_frame_debug_expr is going
+ to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
+ But that's OK. All we have to do is specify that _one_ condition
+ code register is saved in this stack slot. The thrower's epilogue
+ will then restore all the call-saved registers.
+ We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
+ set = gen_rtx_SET (VOIDmode, cr_save_rtx,
+ gen_rtx_REG (SImode, CR2_REGNO));
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ set,
+ REG_NOTES (insn));
}
/* Do any required saving of fpr's. If only one or two to save, do
it ourselves. Otherwise, call function. */
- if (saving_FPRs_inline)
+ if (!WORLD_SAVE_P (info) && saving_FPRs_inline)
{
int i;
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- if ((regs_ever_live[info->first_fp_reg_save+i]
+ if ((regs_ever_live[info->first_fp_reg_save+i]
&& ! call_used_regs[info->first_fp_reg_save+i]))
emit_frame_save (frame_reg_rtx, frame_ptr_rtx, DFmode,
info->first_fp_reg_save + i,
info->fp_save_offset + sp_offset + 8 * i,
info->total_size);
}
- else if (info->first_fp_reg_save != 64)
+ else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
{
int i;
char rname[30];
const char *alloc_rname;
rtvec p;
p = rtvec_alloc (2 + 64 - info->first_fp_reg_save);
-
- RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode,
+
+ RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
LINK_REGISTER_REGNUM));
sprintf (rname, "%s%d%s", SAVE_FP_PREFIX,
info->first_fp_reg_save - 32, SAVE_FP_SUFFIX);
@@ -12183,21 +14567,20 @@ rs6000_emit_prologue (void)
rtx addr, reg, mem;
reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
+ GEN_INT (info->fp_save_offset
+ sp_offset + 8*i));
- mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (DFmode, addr);
RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg);
}
insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
/* Save GPRs. This is done as a PARALLEL if we are using
the store-multiple instructions. */
- if (using_store_multiple)
+ if (!WORLD_SAVE_P (info) && using_store_multiple)
{
rtvec p;
int i;
@@ -12206,29 +14589,28 @@ rs6000_emit_prologue (void)
{
rtx addr, reg, mem;
reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
- mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (reg_mode, addr);
RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
}
insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
- else
+ else if (!WORLD_SAVE_P (info))
{
int i;
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
- if ((regs_ever_live[info->first_gp_reg_save+i]
- && (! call_used_regs[info->first_gp_reg_save+i]
- || (i+info->first_gp_reg_save
+ if ((regs_ever_live[info->first_gp_reg_save + i]
+ && (!call_used_regs[info->first_gp_reg_save + i]
+ || (i + info->first_gp_reg_save
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& TARGET_TOC && TARGET_MINIMAL_TOC)))
- || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
+ || (i + info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
{
@@ -12249,8 +14631,7 @@ rs6000_emit_prologue (void)
b = GEN_INT (offset);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, b);
- mem = gen_rtx_MEM (V2SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (V2SImode, addr);
insn = emit_move_insn (mem, reg);
if (GET_CODE (b) == CONST_INT)
@@ -12262,15 +14643,14 @@ rs6000_emit_prologue (void)
}
else
{
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
- mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (reg_mode, addr);
insn = emit_move_insn (mem, reg);
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
}
@@ -12290,11 +14670,10 @@ rs6000_emit_prologue (void)
reg = gen_rtx_REG (reg_mode, 2);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (sp_offset + 5 * reg_size));
- mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (reg_mode, addr);
insn = emit_move_insn (mem, reg);
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
PATTERN (insn) = gen_blockage ();
}
@@ -12313,100 +14692,117 @@ rs6000_emit_prologue (void)
}
/* Save lr if we used it. */
- if (info->lr_save_p)
+ if (!WORLD_SAVE_P (info) && info->lr_save_p)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->lr_save_offset + sp_offset));
rtx reg = gen_rtx_REG (Pmode, 0);
rtx mem = gen_rtx_MEM (Pmode, addr);
- /* This should not be of rs6000_sr_alias_set, because of
+ /* This should not be of frame_alias_set, because of
__builtin_return_address. */
-
+
insn = emit_move_insn (mem, reg);
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
- reg, gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
}
/* Save CR if we use any that must be preserved. */
- if (info->cr_save_p)
+ if (!WORLD_SAVE_P (info) && info->cr_save_p)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->cr_save_offset + sp_offset));
- rtx mem = gen_rtx_MEM (SImode, addr);
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (SImode, addr);
+ /* See the large comment above about why CR2_REGNO is used. */
+ rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
/* If r12 was used to hold the original sp, copy cr into r0 now
that it's free. */
if (REGNO (frame_reg_rtx) == 12)
{
+ rtx set;
+
cr_save_rtx = gen_rtx_REG (SImode, 0);
- emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ set,
+ REG_NOTES (insn));
+
}
insn = emit_move_insn (mem, cr_save_rtx);
- /* Now, there's no way that dwarf2out_frame_debug_expr is going
- to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
- But that's OK. All we have to do is specify that _one_ condition
- code register is saved in this stack slot. The thrower's epilogue
- will then restore all the call-saved registers.
- We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
- cr_save_rtx, gen_rtx_REG (SImode, CR2_REGNO));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
}
- /* Update stack and set back pointer unless this is V.4,
+ /* Update stack and set back pointer unless this is V.4,
for which it was done previously. */
- if (info->push_p
+ if (!WORLD_SAVE_P (info) && info->push_p
&& !(DEFAULT_ABI == ABI_V4 || current_function_calls_eh_return))
rs6000_emit_allocate_stack (info->total_size, FALSE);
/* Set frame pointer, if needed. */
if (frame_pointer_needed)
{
- insn = emit_move_insn (gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),
+ insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
sp_reg_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
}
/* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
- || (DEFAULT_ABI == ABI_V4 && flag_pic == 1
+ || (DEFAULT_ABI == ABI_V4
+ && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
&& regs_ever_live[RS6000_PIC_OFFSET_TABLE_REGNUM]))
- {
- /* If emit_load_toc_table will use the link register, we need to save
- it. We use R12 for this purpose because emit_load_toc_table
- can use register 0. This allows us to use a plain 'blr' to return
- from the procedure more often. */
- int save_LR_around_toc_setup = (TARGET_ELF
- && DEFAULT_ABI != ABI_AIX
- && flag_pic
- && ! info->lr_save_p
- && EXIT_BLOCK_PTR->pred != NULL);
- if (save_LR_around_toc_setup)
- {
- rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
- rs6000_maybe_dead (emit_move_insn (frame_ptr_rtx, lr));
+ {
+ /* If emit_load_toc_table will use the link register, we need to save
+ it. We use R12 for this purpose because emit_load_toc_table
+ can use register 0. This allows us to use a plain 'blr' to return
+ from the procedure more often. */
+ int save_LR_around_toc_setup = (TARGET_ELF
+ && DEFAULT_ABI != ABI_AIX
+ && flag_pic
+ && ! info->lr_save_p
+ && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
+ if (save_LR_around_toc_setup)
+ {
+ rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
+
+ insn = emit_move_insn (frame_ptr_rtx, lr);
+ rs6000_maybe_dead (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ rs6000_emit_load_toc_table (TRUE);
+
+ insn = emit_move_insn (lr, frame_ptr_rtx);
+ rs6000_maybe_dead (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
rs6000_emit_load_toc_table (TRUE);
- rs6000_maybe_dead (emit_move_insn (lr, frame_ptr_rtx));
- }
- else
- rs6000_emit_load_toc_table (TRUE);
- }
+ }
#if TARGET_MACHO
if (DEFAULT_ABI == ABI_DARWIN
&& flag_pic && current_function_uses_pic_offset_table)
{
- rtx dest = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
- const char *picbase = machopic_function_base_name ();
- rtx src = gen_rtx_SYMBOL_REF (Pmode, picbase);
+ rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
+ rtx src = machopic_function_base_sym ();
+
+ /* Save and restore LR locally around this call (in R0). */
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (gen_rtx_REG (Pmode, 0), lr));
+
+ rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (lr, src)));
- rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (dest, src)));
+ insn = emit_move_insn (gen_rtx_REG (Pmode,
+ RS6000_PIC_OFFSET_TABLE_REGNUM),
+ lr);
+ rs6000_maybe_dead (insn);
- rs6000_maybe_dead (
- emit_move_insn (gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM),
- gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)));
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (lr, gen_rtx_REG (Pmode, 0)));
}
#endif
}
@@ -12414,7 +14810,7 @@ rs6000_emit_prologue (void)
/* Write function prologue. */
static void
-rs6000_output_function_prologue (FILE *file,
+rs6000_output_function_prologue (FILE *file,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
rs6000_stack_t *info = rs6000_stack_info ();
@@ -12466,13 +14862,13 @@ rs6000_output_function_prologue (FILE *file,
if (TARGET_DEBUG_STACK)
debug_rtx_list (get_insns (), 100);
- final (get_insns (), file, FALSE, FALSE);
+ final (get_insns (), file, FALSE);
end_sequence ();
}
rs6000_pic_labelno++;
}
-
+
/* Emit function epilogue as insns.
At present, dwarf2out_frame_debug_expr doesn't understand
@@ -12506,12 +14902,13 @@ rs6000_emit_epilogue (int sibcall)
using_load_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64
&& (!TARGET_SPE_ABI
|| info->spe_64bit_regs_used == 0)
- && info->first_gp_reg_save < 31);
+ && info->first_gp_reg_save < 31
+ && no_global_regs_above (info->first_gp_reg_save));
restoring_FPRs_inline = (sibcall
|| current_function_calls_eh_return
|| info->first_fp_reg_save == 64
|| FP_SAVE_INLINE (info->first_fp_reg_save));
- use_backchain_to_restore_sp = (frame_pointer_needed
+ use_backchain_to_restore_sp = (frame_pointer_needed
|| current_function_calls_alloca
|| info->total_size > 32767);
using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
@@ -12519,6 +14916,97 @@ rs6000_emit_epilogue (int sibcall)
|| rs6000_cpu == PROCESSOR_PPC750
|| optimize_size);
+ if (WORLD_SAVE_P (info))
+ {
+ int i, j;
+ char rname[30];
+ const char *alloc_rname;
+ rtvec p;
+
+ /* eh_rest_world_r10 will return to the location saved in the LR
+ stack slot (which is not likely to be our caller.)
+ Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
+ rest_world is similar, except any R10 parameter is ignored.
+ The exception-handling stuff that was here in 2.95 is no
+ longer necessary. */
+
+ p = rtvec_alloc (9
+ + 1
+ + 32 - info->first_gp_reg_save
+ + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
+ + 63 + 1 - info->first_fp_reg_save);
+
+ strcpy (rname, ((current_function_calls_eh_return) ?
+ "*eh_rest_world_r10" : "*rest_world"));
+ alloc_rname = ggc_strdup (rname);
+
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
+ /* The instruction pattern requires a clobber here;
+ it is shared with the restVEC helper. */
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + 16 * i));
+ rtx mem = gen_frame_mem (V4SImode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_fp_reg_save + i <= 63; i++)
+ {
+ rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + 8 * i));
+ rtx mem = gen_frame_mem (DFmode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+
+ return;
+ }
+
/* If we have a frame pointer, a call to alloca, or a large stack
frame, restore the old stack pointer using the backchain. Otherwise,
we know what size to update it with. */
@@ -12531,7 +15019,6 @@ rs6000_emit_epilogue (int sibcall)
emit_move_insn (frame_reg_rtx,
gen_rtx_MEM (Pmode, sp_reg_rtx));
-
}
else if (info->push_p)
{
@@ -12547,7 +15034,7 @@ rs6000_emit_epilogue (int sibcall)
GEN_INT (info->total_size)));
}
}
-
+
/* Restore AltiVec registers if needed. */
if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
{
@@ -12566,8 +15053,7 @@ rs6000_emit_epilogue (int sibcall)
/* AltiVec addressing mode is [reg+reg]. */
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
- mem = gen_rtx_MEM (V4SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (V4SImode, addr);
emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
}
@@ -12581,8 +15067,7 @@ rs6000_emit_epilogue (int sibcall)
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->vrsave_save_offset + sp_offset));
- mem = gen_rtx_MEM (SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (SImode, addr);
reg = gen_rtx_REG (SImode, 12);
emit_move_insn (reg, mem);
@@ -12595,28 +15080,24 @@ rs6000_emit_epilogue (int sibcall)
rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
info->lr_save_offset + sp_offset);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
-
emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
}
-
+
/* Get the old cr if we saved it. */
if (info->cr_save_p)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->cr_save_offset + sp_offset));
- rtx mem = gen_rtx_MEM (SImode, addr);
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (SImode, addr);
emit_move_insn (gen_rtx_REG (SImode, 12), mem);
}
-
+
/* Set LR here to try to overlap restores below. */
if (info->lr_save_p)
emit_move_insn (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM),
gen_rtx_REG (Pmode, 0));
-
+
/* Load exception handler data registers, if needed. */
if (current_function_calls_eh_return)
{
@@ -12626,9 +15107,7 @@ rs6000_emit_epilogue (int sibcall)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (sp_offset + 5 * reg_size));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
}
@@ -12644,12 +15123,11 @@ rs6000_emit_epilogue (int sibcall)
mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
info->ehrd_offset + sp_offset
+ reg_size * (int) i);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
}
}
-
+
/* Restore GPRs. This is done as a PARALLEL if we are using
the load-multiple instructions. */
if (using_load_multiple)
@@ -12658,15 +15136,13 @@ rs6000_emit_epilogue (int sibcall)
p = rtvec_alloc (32 - info->first_gp_reg_save);
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
{
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
- RTVEC_ELT (p, i) =
+ RTVEC_ELT (p, i) =
gen_rtx_SET (VOIDmode,
gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
mem);
@@ -12675,19 +15151,19 @@ rs6000_emit_epilogue (int sibcall)
}
else
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
- if ((regs_ever_live[info->first_gp_reg_save+i]
- && (! call_used_regs[info->first_gp_reg_save+i]
- || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
+ if ((regs_ever_live[info->first_gp_reg_save + i]
+ && (!call_used_regs[info->first_gp_reg_save + i]
+ || (i + info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& TARGET_TOC && TARGET_MINIMAL_TOC)))
- || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
+ || (i + info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
{
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
+ rtx mem = gen_frame_mem (reg_mode, addr);
/* Restore 64-bit quantities for SPE. */
if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
@@ -12704,30 +15180,27 @@ rs6000_emit_epilogue (int sibcall)
b = GEN_INT (offset);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, b);
- mem = gen_rtx_MEM (V2SImode, addr);
+ mem = gen_frame_mem (V2SImode, addr);
}
- set_mem_alias_set (mem, rs6000_sr_alias_set);
-
- emit_move_insn (gen_rtx_REG (reg_mode,
+ emit_move_insn (gen_rtx_REG (reg_mode,
info->first_gp_reg_save + i), mem);
}
/* Restore fpr's if we need to do it without calling a function. */
if (restoring_FPRs_inline)
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- if ((regs_ever_live[info->first_fp_reg_save+i]
+ if ((regs_ever_live[info->first_fp_reg_save+i]
&& ! call_used_regs[info->first_fp_reg_save+i]))
{
rtx addr, mem;
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
- + sp_offset
+ GEN_INT (info->fp_save_offset
+ + sp_offset
+ 8 * i));
- mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (DFmode, addr);
- emit_move_insn (gen_rtx_REG (DFmode,
+ emit_move_insn (gen_rtx_REG (DFmode,
info->first_fp_reg_save + i),
mem);
}
@@ -12737,21 +15210,20 @@ rs6000_emit_epilogue (int sibcall)
{
rtx r12_rtx = gen_rtx_REG (SImode, 12);
int count = 0;
-
+
if (using_mfcr_multiple)
{
for (i = 0; i < 8; i++)
if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
count++;
- if (count == 0)
- abort ();
+ gcc_assert (count);
}
if (using_mfcr_multiple && count > 1)
{
rtvec p;
int ndx;
-
+
p = rtvec_alloc (count);
ndx = 0;
@@ -12762,49 +15234,38 @@ rs6000_emit_epilogue (int sibcall)
RTVEC_ELT (r, 0) = r12_rtx;
RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
RTVEC_ELT (p, ndx) =
- gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
+ gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
ndx++;
}
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
- if (ndx != count)
- abort ();
+ gcc_assert (ndx == count);
}
else
for (i = 0; i < 8; i++)
if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
{
- emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
+ emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
CR0_REGNO+i),
r12_rtx));
}
}
/* If this is V.4, unwind the stack pointer after all of the loads
- have been done. We need to emit a block here so that sched
- doesn't decide to move the sp change before the register restores
- (which may not have any obvious dependency on the stack). This
- doesn't hurt performance, because there is no scheduling that can
- be done after this point. */
- if (DEFAULT_ABI == ABI_V4
- || current_function_calls_eh_return)
+ have been done. */
+ if (frame_reg_rtx != sp_reg_rtx)
{
- if (frame_reg_rtx != sp_reg_rtx)
- rs6000_emit_stack_tie ();
-
- if (use_backchain_to_restore_sp)
- {
- emit_move_insn (sp_reg_rtx, frame_reg_rtx);
- }
- else if (sp_offset != 0)
- {
- emit_insn (TARGET_32BIT
- ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
- GEN_INT (sp_offset))
- : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
- GEN_INT (sp_offset)));
- }
+ /* This blockage is needed so that sched doesn't decide to move
+ the sp change before the register restores. */
+ rs6000_emit_stack_tie ();
+ emit_move_insn (sp_reg_rtx, frame_reg_rtx);
}
+ else if (sp_offset != 0)
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset))
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset)));
if (current_function_calls_eh_return)
{
@@ -12823,8 +15284,8 @@ rs6000_emit_epilogue (int sibcall)
p = rtvec_alloc (2);
RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
- RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
- gen_rtx_REG (Pmode,
+ RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
LINK_REGISTER_REGNUM));
/* If we have to restore more than two FP registers, branch to the
@@ -12835,7 +15296,7 @@ rs6000_emit_epilogue (int sibcall)
char rname[30];
const char *alloc_rname;
- sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
+ sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
alloc_rname = ggc_strdup (rname);
RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode,
@@ -12847,16 +15308,15 @@ rs6000_emit_epilogue (int sibcall)
rtx addr, mem;
addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
GEN_INT (info->fp_save_offset + 8*i));
- mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (DFmode, addr);
- RTVEC_ELT (p, i+3) =
+ RTVEC_ELT (p, i+3) =
gen_rtx_SET (VOIDmode,
gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
mem);
}
}
-
+
emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
}
}
@@ -12864,11 +15324,9 @@ rs6000_emit_epilogue (int sibcall)
/* Write function epilogue. */
static void
-rs6000_output_function_epilogue (FILE *file,
+rs6000_output_function_epilogue (FILE *file,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
- rs6000_stack_t *info = rs6000_stack_info ();
-
if (! HAVE_epilogue)
{
rtx insn = get_last_insn ();
@@ -12901,7 +15359,7 @@ rs6000_output_function_epilogue (FILE *file,
if (TARGET_DEBUG_STACK)
debug_rtx_list (get_insns (), 100);
- final (get_insns (), file, FALSE, FALSE);
+ final (get_insns (), file, FALSE);
end_sequence ();
}
}
@@ -12916,8 +15374,8 @@ rs6000_output_function_epilogue (FILE *file,
&& NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
insn = PREV_INSN (insn);
- if (insn
- && (LABEL_P (insn)
+ if (insn
+ && (LABEL_P (insn)
|| (NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
fputs ("\tnop\n", file);
@@ -12939,13 +15397,14 @@ rs6000_output_function_epilogue (FILE *file,
System V.4 Powerpc's (and the embedded ABI derived from it) use a
different traceback table. */
if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
- && rs6000_traceback != traceback_none)
+ && rs6000_traceback != traceback_none && !current_function_is_thunk)
{
const char *fname = NULL;
const char *language_string = lang_hooks.name;
int fixed_parms = 0, float_parms = 0, parm_info = 0;
int i;
int optional_tbtab;
+ rs6000_stack_t *info = rs6000_stack_info ();
if (rs6000_traceback == traceback_full)
optional_tbtab = 1;
@@ -12986,23 +15445,26 @@ rs6000_output_function_epilogue (FILE *file,
official way to discover the language being compiled, so we
use language_string.
C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
- Java is 13. Objective-C is 14. */
+ Java is 13. Objective-C is 14. Objective-C++ isn't assigned
+ a number, so for now use 9. */
if (! strcmp (language_string, "GNU C"))
i = 0;
- else if (! strcmp (language_string, "GNU F77"))
+ else if (! strcmp (language_string, "GNU F77")
+ || ! strcmp (language_string, "GNU F95"))
i = 1;
else if (! strcmp (language_string, "GNU Pascal"))
i = 2;
else if (! strcmp (language_string, "GNU Ada"))
i = 3;
- else if (! strcmp (language_string, "GNU C++"))
+ else if (! strcmp (language_string, "GNU C++")
+ || ! strcmp (language_string, "GNU Objective-C++"))
i = 9;
else if (! strcmp (language_string, "GNU Java"))
i = 13;
else if (! strcmp (language_string, "GNU Objective-C"))
i = 14;
else
- abort ();
+ gcc_unreachable ();
fprintf (file, "%d,", i);
/* 8 single bit fields: global linkage (not set for C extern linkage,
@@ -13049,18 +15511,26 @@ rs6000_output_function_epilogue (FILE *file,
if (GET_CODE (parameter) == REG)
{
- if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ if (SCALAR_FLOAT_MODE_P (mode))
{
int bits;
float_parms++;
- if (mode == SFmode)
- bits = 0x2;
- else if (mode == DFmode || mode == TFmode)
- bits = 0x3;
- else
- abort ();
+ switch (mode)
+ {
+ case SFmode:
+ bits = 0x2;
+ break;
+
+ case DFmode:
+ case TFmode:
+ bits = 0x3;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
/* If only one bit will fit, don't or in this entry. */
if (next_parm_info_bit > 0)
@@ -13110,17 +15580,12 @@ rs6000_output_function_epilogue (FILE *file,
/* Offset from start of code to tb table. */
fputs ("\t.long ", file);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
-#if TARGET_AIX
- RS6000_OUTPUT_BASENAME (file, fname);
-#else
- assemble_name (file, fname);
-#endif
- fputs ("-.", file);
-#if TARGET_AIX
- RS6000_OUTPUT_BASENAME (file, fname);
-#else
- assemble_name (file, fname);
-#endif
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+ putc ('-', file);
+ rs6000_output_function_entry (file, fname);
putc ('\n', file);
/* Interrupt handler mask. */
@@ -13183,8 +15648,8 @@ rs6000_output_function_epilogue (FILE *file,
not support varargs. */
static void
-rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
- HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
rtx this, insn, funexp;
@@ -13192,6 +15657,7 @@ rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
reload_completed = 1;
epilogue_completed = 1;
no_new_pseudos = 1;
+ reset_block_changes ();
/* Mark the end of the (empty) prologue. */
emit_note (NOTE_INSN_PROLOGUE_END);
@@ -13252,7 +15718,7 @@ rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
#endif
/* gen_sibcall expects reload to convert scratch pseudo to LR so we must
- generate sibcall RTL explicitly to avoid constraint abort. */
+ generate sibcall RTL explicitly. */
insn = emit_call_insn (
gen_rtx_PARALLEL (VOIDmode,
gen_rtvec (4,
@@ -13274,7 +15740,7 @@ rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
insn_locators_initialize ();
shorten_branches (insn);
final_start_function (insn, file, 1);
- final (insn, file, 1, 0);
+ final (insn, file, 1);
final_end_function ();
reload_completed = 0;
@@ -13285,7 +15751,7 @@ rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
/* A quick summary of the various types of 'constant-pool tables'
under PowerPC:
- Target Flags Name One table per
+ Target Flags Name One table per
AIX (none) AIX TOC object file
AIX -mfull-toc AIX TOC object file
AIX -mminimal-toc AIX minimal TOC translation unit
@@ -13294,7 +15760,7 @@ rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
SVR4/EABI -fPIC SVR4 PIC translation unit
SVR4/EABI -mrelocatable EABI TOC function
SVR4/EABI -maix AIX TOC object file
- SVR4/EABI -maix -mminimal-toc
+ SVR4/EABI -maix -mminimal-toc
AIX minimal TOC translation unit
Name Reg. Set by entries contains:
@@ -13319,7 +15785,7 @@ rs6000_hash_constant (rtx k)
unsigned result = (code << 3) ^ mode;
const char *format;
int flen, fidx;
-
+
format = GET_RTX_FORMAT (code);
flen = strlen (format);
fidx = 0;
@@ -13370,7 +15836,7 @@ rs6000_hash_constant (rtx k)
else
{
size_t i;
- for (i = 0; i < sizeof(HOST_WIDE_INT)/sizeof(unsigned); i++)
+ for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
result = result * 613 + (unsigned) (XWINT (k, fidx)
>> CHAR_BIT * i);
}
@@ -13378,7 +15844,7 @@ rs6000_hash_constant (rtx k)
case '0':
break;
default:
- abort ();
+ gcc_unreachable ();
}
return result;
@@ -13387,7 +15853,7 @@ rs6000_hash_constant (rtx k)
static unsigned
toc_hash_function (const void *hash_entry)
{
- const struct toc_hash_struct *thc =
+ const struct toc_hash_struct *thc =
(const struct toc_hash_struct *) hash_entry;
return rs6000_hash_constant (thc->key) ^ thc->key_mode;
}
@@ -13413,11 +15879,11 @@ toc_hash_eq (const void *h1, const void *h2)
to whether or not an object is a vtable. */
#define VTABLE_NAME_P(NAME) \
- (strncmp ("_vt.", name, strlen("_vt.")) == 0 \
+ (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
|| strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
|| strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
|| strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
- || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
+ || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
void
rs6000_output_symbol_ref (FILE *file, rtx x)
@@ -13425,12 +15891,12 @@ rs6000_output_symbol_ref (FILE *file, rtx x)
/* Currently C++ toc references to vtables can be emitted before it
is decided whether the vtable is public or private. If this is
the case, then the linker will eventually complain that there is
- a reference to an unknown section. Thus, for vtables only,
+ a reference to an unknown section. Thus, for vtables only,
we emit the TOC reference to reference the symbol and not the
section. */
const char *name = XSTR (x, 0);
- if (VTABLE_NAME_P (name))
+ if (VTABLE_NAME_P (name))
{
RS6000_OUTPUT_BASENAME (file, name);
}
@@ -13448,10 +15914,9 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
const char *name = buf;
const char *real_name;
rtx base = x;
- int offset = 0;
+ HOST_WIDE_INT offset = 0;
- if (TARGET_NO_TOC)
- abort ();
+ gcc_assert (!TARGET_NO_TOC);
/* When the linker won't eliminate them, don't output duplicate
TOC entries (this happens on AIX if there is any kind of TOC,
@@ -13461,29 +15926,29 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
{
struct toc_hash_struct *h;
void * * found;
-
+
/* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
- time because GGC is not initialized at that point. */
+ time because GGC is not initialized at that point. */
if (toc_hash_table == NULL)
- toc_hash_table = htab_create_ggc (1021, toc_hash_function,
+ toc_hash_table = htab_create_ggc (1021, toc_hash_function,
toc_hash_eq, NULL);
h = ggc_alloc (sizeof (*h));
h->key = x;
h->key_mode = mode;
h->labelno = labelno;
-
+
found = htab_find_slot (toc_hash_table, h, 1);
if (*found == NULL)
*found = h;
- else /* This is indeed a duplicate.
+ else /* This is indeed a duplicate.
Set this label equal to that label. */
{
fputs ("\t.set ", file);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
fprintf (file, "%d,", labelno);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
- fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
+ fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
found)->labelno));
return;
}
@@ -13503,13 +15968,17 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
/* Handle FP constants specially. Note that if we have a minimal
TOC, things we put here aren't actually in the TOC, so we can allow
FP constants. */
- if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == TFmode)
+ if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
{
REAL_VALUE_TYPE rv;
long k[4];
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
- REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
if (TARGET_64BIT)
{
@@ -13538,13 +16007,18 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
return;
}
}
- else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
{
REAL_VALUE_TYPE rv;
long k[2];
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
- REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
if (TARGET_64BIT)
{
@@ -13569,13 +16043,17 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
return;
}
}
- else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
{
REAL_VALUE_TYPE rv;
long l;
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
- REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
+ else
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
if (TARGET_64BIT)
{
@@ -13615,8 +16093,8 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
}
#else
{
- low = INTVAL (x) & 0xffffffff;
- high = (HOST_WIDE_INT) INTVAL (x) >> 32;
+ low = INTVAL (x) & 0xffffffff;
+ high = (HOST_WIDE_INT) INTVAL (x) >> 32;
}
#endif
@@ -13630,8 +16108,8 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
For a 32-bit target, CONST_INT values are loaded and shifted
entirely within `low' and can be stored in one TOC entry. */
- if (TARGET_64BIT && POINTER_SIZE < GET_MODE_BITSIZE (mode))
- abort ();/* It would be easy to make this work, but it doesn't now. */
+ /* It would be easy to make this work, but it doesn't now. */
+ gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
{
@@ -13683,21 +16161,30 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
if (GET_CODE (x) == CONST)
{
- if (GET_CODE (XEXP (x, 0)) != PLUS)
- abort ();
+ gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS);
base = XEXP (XEXP (x, 0), 0);
offset = INTVAL (XEXP (XEXP (x, 0), 1));
}
-
- if (GET_CODE (base) == SYMBOL_REF)
- name = XSTR (base, 0);
- else if (GET_CODE (base) == LABEL_REF)
- ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (base, 0)));
- else if (GET_CODE (base) == CODE_LABEL)
- ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
- else
- abort ();
+
+ switch (GET_CODE (base))
+ {
+ case SYMBOL_REF:
+ name = XSTR (base, 0);
+ break;
+
+ case LABEL_REF:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L",
+ CODE_LABEL_NUMBER (XEXP (base, 0)));
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
real_name = (*targetm.strip_name_encoding) (name);
if (TARGET_MINIMAL_TOC)
@@ -13707,9 +16194,9 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
fprintf (file, "\t.tc %s", real_name);
if (offset < 0)
- fprintf (file, ".N%d", - offset);
+ fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
else if (offset)
- fprintf (file, ".P%d", offset);
+ fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
fputs ("[TC],", file);
}
@@ -13724,9 +16211,9 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
{
RS6000_OUTPUT_BASENAME (file, name);
if (offset < 0)
- fprintf (file, "%d", offset);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
else if (offset > 0)
- fprintf (file, "+%d", offset);
+ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
}
else
output_addr_const (file, x);
@@ -13813,8 +16300,8 @@ output_ascii (FILE *file, const char *p, int n)
the name. */
void
-rs6000_gen_section_name (char **buf, const char *filename,
- const char *section_desc)
+rs6000_gen_section_name (char **buf, const char *filename,
+ const char *section_desc)
{
const char *q, *after_last_slash, *last_period = 0;
char *p;
@@ -13838,14 +16325,14 @@ rs6000_gen_section_name (char **buf, const char *filename,
for (q = after_last_slash; *q; q++)
{
if (q == last_period)
- {
+ {
strcpy (p, section_desc);
p += strlen (section_desc);
break;
- }
+ }
else if (ISALNUM (*q))
- *p++ = *q;
+ *p++ = *q;
}
if (last_period == 0)
@@ -13859,6 +16346,10 @@ rs6000_gen_section_name (char **buf, const char *filename,
void
output_profile_hook (int labelno ATTRIBUTE_UNUSED)
{
+ /* Non-standard profiling for kernels, which just saves LR then calls
+ _mcount without worrying about arg saves. The idea is to change
+ the function prologue as little as possible as it isn't easy to
+ account for arg save/restore code added just for _mcount. */
if (TARGET_PROFILE_KERNEL)
return;
@@ -13867,7 +16358,7 @@ output_profile_hook (int labelno ATTRIBUTE_UNUSED)
#ifndef NO_PROFILE_COUNTERS
# define NO_PROFILE_COUNTERS 0
#endif
- if (NO_PROFILE_COUNTERS)
+ if (NO_PROFILE_COUNTERS)
emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 0);
else
{
@@ -13894,12 +16385,9 @@ output_profile_hook (int labelno ATTRIBUTE_UNUSED)
#if TARGET_MACHO
/* For PIC code, set up a stub and collect the caller's address
from r0, which is where the prologue puts it. */
- if (MACHOPIC_INDIRECT)
- {
- mcount_name = machopic_stub_name (mcount_name);
- if (current_function_uses_pic_offset_table)
- caller_addr_regno = 0;
- }
+ if (MACHOPIC_INDIRECT
+ && current_function_uses_pic_offset_table)
+ caller_addr_regno = 0;
#endif
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
0, VOIDmode, 1,
@@ -13913,27 +16401,42 @@ void
output_function_profiler (FILE *file, int labelno)
{
char buf[100];
- int save_lr = 8;
switch (DEFAULT_ABI)
{
default:
- abort ();
+ gcc_unreachable ();
case ABI_V4:
- save_lr = 4;
if (!TARGET_32BIT)
{
- warning ("no profiling of 64-bit code for this ABI");
+ warning (0, "no profiling of 64-bit code for this ABI");
return;
}
ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
fprintf (file, "\tmflr %s\n", reg_names[0]);
- if (flag_pic == 1)
+ if (NO_PROFILE_COUNTERS)
+ {
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ }
+ else if (TARGET_SECURE_PLT && flag_pic)
+ {
+ asm_fprintf (file, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
+ asm_fprintf (file, "\t{cau|addis} %s,%s,",
+ reg_names[12], reg_names[12]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
+ }
+ else if (flag_pic == 1)
{
fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
- asm_fprintf (file, "\t{st|stw} %s,%d(%s)\n",
- reg_names[0], save_lr, reg_names[1]);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
assemble_name (file, buf);
@@ -13941,14 +16444,14 @@ output_function_profiler (FILE *file, int labelno)
}
else if (flag_pic > 1)
{
- asm_fprintf (file, "\t{st|stw} %s,%d(%s)\n",
- reg_names[0], save_lr, reg_names[1]);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
/* Now, we need to get the address of the label. */
- fputs ("\tbl 1f\n\t.long ", file);
+ fputs ("\tbcl 20,31,1f\n\t.long ", file);
assemble_name (file, buf);
fputs ("-.\n1:", file);
asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
- asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
+ asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
reg_names[0], reg_names[11]);
asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
reg_names[0], reg_names[0], reg_names[11]);
@@ -13958,8 +16461,8 @@ output_function_profiler (FILE *file, int labelno)
asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
assemble_name (file, buf);
fputs ("@ha\n", file);
- asm_fprintf (file, "\t{st|stw} %s,%d(%s)\n",
- reg_names[0], save_lr, reg_names[1]);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
assemble_name (file, buf);
asm_fprintf (file, "@l(%s)\n", reg_names[12]);
@@ -13968,7 +16471,6 @@ output_function_profiler (FILE *file, int labelno)
/* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
fprintf (file, "\tbl %s%s\n",
RS6000_MCOUNT, flag_pic ? "@plt" : "");
-
break;
case ABI_AIX:
@@ -13979,13 +16481,12 @@ output_function_profiler (FILE *file, int labelno)
}
else
{
- if (TARGET_32BIT)
- abort ();
+ gcc_assert (!TARGET_32BIT);
asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
- if (current_function_needs_context)
+ if (cfun->static_chain_decl != NULL)
{
asm_fprintf (file, "\tstd %s,24(%s)\n",
reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
@@ -14001,12 +16502,6 @@ output_function_profiler (FILE *file, int labelno)
}
-static int
-rs6000_use_dfa_pipeline_interface (void)
-{
- return 1;
-}
-
/* Power4 load update and store update instructions are cracked into a
load or store and an integer insn which are executed in the same cycle.
Branches have their own dispatch slot which does not count against the
@@ -14014,8 +16509,8 @@ rs6000_use_dfa_pipeline_interface (void)
instructions to issue in this cycle. */
static int
-rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
- int verbose ATTRIBUTE_UNUSED,
+rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
rtx insn, int more)
{
if (GET_CODE (PATTERN (insn)) == USE
@@ -14025,9 +16520,9 @@ rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
if (rs6000_sched_groups)
{
if (is_microcoded_insn (insn))
- return 0;
+ return 0;
else if (is_cracked_insn (insn))
- return more > 2 ? more - 2 : 0;
+ return more > 2 ? more - 2 : 0;
}
return more - 1;
@@ -14037,8 +16532,7 @@ rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
static int
-rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn ATTRIBUTE_UNUSED,
- int cost)
+rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
{
if (! recog_memoized (insn))
return 0;
@@ -14050,6 +16544,17 @@ rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn ATTRIBUTE_UNUSED,
{
/* Data dependency; DEP_INSN writes a register that INSN reads
some cycles later. */
+
+ /* Separate a load from a narrower, dependent store. */
+ if (rs6000_sched_groups
+ && GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (PATTERN (dep_insn)) == SET
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
+ && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
+ && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
+ > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
+ return cost + 14;
+
switch (get_attr_type (insn))
{
case TYPE_JMPREG:
@@ -14111,7 +16616,7 @@ is_microcoded_insn (rtx insn)
|| type == TYPE_LOAD_UX
|| type == TYPE_STORE_UX
|| type == TYPE_MFCR)
- return true;
+ return true;
}
return false;
@@ -14153,6 +16658,11 @@ is_dispatch_slot_restricted (rtx insn)
case TYPE_IDIV:
case TYPE_LDIV:
return 2;
+ case TYPE_LOAD_L:
+ case TYPE_STORE_C:
+ case TYPE_ISYNC:
+ case TYPE_SYNC:
+ return 4;
default:
if (rs6000_cpu == PROCESSOR_POWER5
&& is_cracked_insn (insn))
@@ -14176,14 +16686,14 @@ is_cracked_insn (rtx insn)
{
enum attr_type type = get_attr_type (insn);
if (type == TYPE_LOAD_U || type == TYPE_STORE_U
- || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
- || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
- || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
- || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
- || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
- || type == TYPE_IDIV || type == TYPE_LDIV
- || type == TYPE_INSERT_WORD)
- return true;
+ || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
+ || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
+ || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
+ || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
+ || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
+ || type == TYPE_IDIV || type == TYPE_LDIV
+ || type == TYPE_INSERT_WORD)
+ return true;
}
return false;
@@ -14204,7 +16714,7 @@ is_branch_slot_insn (rtx insn)
{
enum attr_type type = get_attr_type (insn);
if (type == TYPE_BRANCH || type == TYPE_JMPREG)
- return true;
+ return true;
return false;
}
@@ -14252,22 +16762,23 @@ rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
if (is_dispatch_slot_restricted (insn)
&& reload_completed
- && current_sched_info->sched_max_insns_priority
+ && current_sched_info->sched_max_insns_priority
&& rs6000_sched_restricted_insns_priority)
{
- /* Prioritize insns that can be dispatched only in the first dispatch slot. */
+ /* Prioritize insns that can be dispatched only in the first
+ dispatch slot. */
if (rs6000_sched_restricted_insns_priority == 1)
- /* Attach highest priority to insn. This means that in
- haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
+ /* Attach highest priority to insn. This means that in
+ haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
precede 'priority' (critical path) considerations. */
- return current_sched_info->sched_max_insns_priority;
+ return current_sched_info->sched_max_insns_priority;
else if (rs6000_sched_restricted_insns_priority == 2)
- /* Increase priority of insn by a minimal amount. This means that in
- haifa-sched.c:ready_sort(), only 'priority' (critical path) considerations
- precede dispatch-slot restriction considerations. */
- return (priority + 1);
- }
+ /* Increase priority of insn by a minimal amount. This means that in
+ haifa-sched.c:ready_sort(), only 'priority' (critical path)
+ considerations precede dispatch-slot restriction considerations. */
+ return (priority + 1);
+ }
return priority;
}
@@ -14292,7 +16803,7 @@ rs6000_issue_rate (void)
case CPU_PPC750:
case CPU_PPC7400:
case CPU_PPC8540:
- return 2;
+ return 2;
case CPU_RIOS2:
case CPU_PPC604:
case CPU_PPC604E:
@@ -14346,7 +16857,7 @@ is_mem_ref (rtx pat)
}
/* Determine if PAT is a PATTERN of a load insn. */
-
+
static bool
is_load_insn1 (rtx pat)
{
@@ -14420,34 +16931,36 @@ is_store_insn (rtx insn)
costly by the given target. */
static bool
-rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost, int distance)
-{
- /* If the flag is not enbled - no dependence is considered costly;
- allow all dependent insns in the same group.
+rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost,
+ int distance)
+{
+ /* If the flag is not enabled - no dependence is considered costly;
+ allow all dependent insns in the same group.
This is the most aggressive option. */
if (rs6000_sched_costly_dep == no_dep_costly)
return false;
- /* If the flag is set to 1 - a dependence is always considered costly;
+ /* If the flag is set to 1 - a dependence is always considered costly;
do not allow dependent instructions in the same group.
This is the most conservative option. */
if (rs6000_sched_costly_dep == all_deps_costly)
- return true;
+ return true;
- if (rs6000_sched_costly_dep == store_to_load_dep_costly
- && is_load_insn (next)
+ if (rs6000_sched_costly_dep == store_to_load_dep_costly
+ && is_load_insn (next)
&& is_store_insn (insn))
/* Prevent load after store in the same group. */
return true;
if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
- && is_load_insn (next)
+ && is_load_insn (next)
&& is_store_insn (insn)
&& (!link || (int) REG_NOTE_KIND (link) == 0))
- /* Prevent load after store in the same group if it is a true dependence. */
+ /* Prevent load after store in the same group if it is a true
+ dependence. */
return true;
-
- /* The flag is set to X; dependences with latency >= X are considered costly,
+
+ /* The flag is set to X; dependences with latency >= X are considered costly,
and will not be scheduled in the same group. */
if (rs6000_sched_costly_dep <= max_dep_latency
&& ((cost - distance) >= (int)rs6000_sched_costly_dep))
@@ -14456,33 +16969,31 @@ rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost, int distanc
return false;
}
-/* Return the next insn after INSN that is found before TAIL is reached,
+/* Return the next insn after INSN that is found before TAIL is reached,
skipping any "non-active" insns - insns that will not actually occupy
an issue slot. Return NULL_RTX if such an insn is not found. */
static rtx
get_next_active_insn (rtx insn, rtx tail)
{
- rtx next_insn;
-
- if (!insn || insn == tail)
+ if (insn == NULL_RTX || insn == tail)
return NULL_RTX;
- next_insn = NEXT_INSN (insn);
-
- while (next_insn
- && next_insn != tail
- && (GET_CODE(next_insn) == NOTE
- || GET_CODE (PATTERN (next_insn)) == USE
- || GET_CODE (PATTERN (next_insn)) == CLOBBER))
+ while (1)
{
- next_insn = NEXT_INSN (next_insn);
- }
-
- if (!next_insn || next_insn == tail)
- return NULL_RTX;
+ insn = NEXT_INSN (insn);
+ if (insn == NULL_RTX || insn == tail)
+ return NULL_RTX;
- return next_insn;
+ if (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && INSN_CODE (insn) != CODE_FOR_stack_tie))
+ break;
+ }
+ return insn;
}
/* Return whether the presence of INSN causes a dispatch group termination
@@ -14514,13 +17025,13 @@ insn_terminates_group_p (rtx insn, enum group_termination which_group)
if (which_group == current_group)
{
if (is_branch_slot_insn (insn))
- return true;
+ return true;
return false;
}
else if (which_group == previous_group)
{
if (is_dispatch_slot_restricted (insn))
- return true;
+ return true;
return false;
}
@@ -14542,23 +17053,23 @@ is_costly_group (rtx *group_insns, rtx next_insn)
{
rtx insn = group_insns[i];
if (!insn)
- continue;
+ continue;
for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
- {
- rtx next = XEXP (link, 0);
- if (next == next_insn)
- {
- cost = insn_cost (insn, link, next_insn);
- if (rs6000_is_costly_dependence (insn, next_insn, link, cost, 0))
- return true;
- }
- }
+ {
+ rtx next = XEXP (link, 0);
+ if (next == next_insn)
+ {
+ cost = insn_cost (insn, link, next_insn);
+ if (rs6000_is_costly_dependence (insn, next_insn, link, cost, 0))
+ return true;
+ }
+ }
}
return false;
}
-/* Utility of the function redefine_groups.
+/* Utility of the function redefine_groups.
Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
in the same dispatch group. If so, insert nops before NEXT_INSN, in order
to keep it "far" (in a separate group) from GROUP_INSNS, following
@@ -14566,14 +17077,15 @@ is_costly_group (rtx *group_insns, rtx next_insn)
-minsert_sched_nops = X:
(1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
in order to force NEXT_INSN into a separate group.
- (2) X < sched_finish_regroup_exact: insert exactly X nops.
- GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
+ (2) X < sched_finish_regroup_exact: insert exactly X nops.
+ GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
insertion (has a group just ended, how many vacant issue slots remain in the
last group, and how many dispatch groups were encountered so far). */
-static int
-force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn,
- bool *group_end, int can_issue_more, int *group_count)
+static int
+force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
+ rtx next_insn, bool *group_end, int can_issue_more,
+ int *group_count)
{
rtx nop;
bool force;
@@ -14593,95 +17105,96 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn,
if (sched_verbose > 6)
fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
- *group_count ,can_issue_more);
+ *group_count ,can_issue_more);
if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
{
if (*group_end)
- can_issue_more = 0;
+ can_issue_more = 0;
/* Since only a branch can be issued in the last issue_slot, it is
sufficient to insert 'can_issue_more - 1' nops if next_insn is not
a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
- in this case the last nop will start a new group and the branch will be
- forced to the new group. */
+ in this case the last nop will start a new group and the branch
+ will be forced to the new group. */
if (can_issue_more && !is_branch_slot_insn (next_insn))
- can_issue_more--;
+ can_issue_more--;
while (can_issue_more > 0)
- {
- nop = gen_nop();
- emit_insn_before (nop, next_insn);
- can_issue_more--;
- }
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
*group_end = true;
return 0;
- }
+ }
if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
{
int n_nops = rs6000_sched_insert_nops;
- /* Nops can't be issued from the branch slot, so the effective
- issue_rate for nops is 'issue_rate - 1'. */
+ /* Nops can't be issued from the branch slot, so the effective
+ issue_rate for nops is 'issue_rate - 1'. */
if (can_issue_more == 0)
- can_issue_more = issue_rate;
+ can_issue_more = issue_rate;
can_issue_more--;
if (can_issue_more == 0)
- {
- can_issue_more = issue_rate - 1;
- (*group_count)++;
- end = true;
- for (i = 0; i < issue_rate; i++)
- {
- group_insns[i] = 0;
- }
- }
+ {
+ can_issue_more = issue_rate - 1;
+ (*group_count)++;
+ end = true;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
while (n_nops > 0)
- {
- nop = gen_nop ();
- emit_insn_before (nop, next_insn);
- if (can_issue_more == issue_rate - 1) /* new group begins */
- end = false;
- can_issue_more--;
- if (can_issue_more == 0)
- {
- can_issue_more = issue_rate - 1;
- (*group_count)++;
- end = true;
- for (i = 0; i < issue_rate; i++)
- {
- group_insns[i] = 0;
- }
- }
- n_nops--;
- }
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ if (can_issue_more == issue_rate - 1) /* new group begins */
+ end = false;
+ can_issue_more--;
+ if (can_issue_more == 0)
+ {
+ can_issue_more = issue_rate - 1;
+ (*group_count)++;
+ end = true;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
+ n_nops--;
+ }
/* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
- can_issue_more++;
+ can_issue_more++;
- *group_end = /* Is next_insn going to start a new group? */
- (end
+ /* Is next_insn going to start a new group? */
+ *group_end
+ = (end
|| (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
|| (can_issue_more <= 2 && is_cracked_insn (next_insn))
|| (can_issue_more < issue_rate &&
- insn_terminates_group_p (next_insn, previous_group)));
+ insn_terminates_group_p (next_insn, previous_group)));
if (*group_end && end)
- (*group_count)--;
+ (*group_count)--;
if (sched_verbose > 6)
- fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
- *group_count, can_issue_more);
- return can_issue_more;
- }
+ fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
+ *group_count, can_issue_more);
+ return can_issue_more;
+ }
return can_issue_more;
}
/* This function tries to synch the dispatch groups that the compiler "sees"
- with the dispatch groups that the processor dispatcher is expected to
+ with the dispatch groups that the processor dispatcher is expected to
form in practice. It tries to achieve this synchronization by forcing the
estimated processor grouping on the compiler (as opposed to the function
'pad_goups' which tries to force the scheduler's grouping on the processor).
@@ -14696,7 +17209,7 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn,
between the insns.
The function estimates the group boundaries that the processor will form as
- folllows: It keeps track of how many vacant issue slots are available after
+ follows: It keeps track of how many vacant issue slots are available after
each insn. A subsequent insn will start a new group if one of the following
4 cases applies:
- no more vacant issue slots remain in the current dispatch group.
@@ -14705,7 +17218,7 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn,
- only the last 2 or less issue slots, including the branch slot, are vacant,
which means that a cracked insn (which occupies two issue slots) can't be
issued in this group.
- - less than 'issue_rate' slots are vacant, and the next insn always needs to
+ - less than 'issue_rate' slots are vacant, and the next insn always needs to
start a new group. */
static int
@@ -14722,7 +17235,7 @@ redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
/* Initialize. */
issue_rate = rs6000_issue_rate ();
group_insns = alloca (issue_rate * sizeof (rtx));
- for (i = 0; i < issue_rate; i++)
+ for (i = 0; i < issue_rate; i++)
{
group_insns[i] = 0;
}
@@ -14736,43 +17249,45 @@ redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
slot = (issue_rate - can_issue_more);
group_insns[slot] = insn;
can_issue_more =
- rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
+ rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
if (insn_terminates_group_p (insn, current_group))
- can_issue_more = 0;
+ can_issue_more = 0;
next_insn = get_next_active_insn (insn, tail);
if (next_insn == NULL_RTX)
- return group_count + 1;
+ return group_count + 1;
- group_end = /* Is next_insn going to start a new group? */
- (can_issue_more == 0
- || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
- || (can_issue_more <= 2 && is_cracked_insn (next_insn))
- || (can_issue_more < issue_rate &&
- insn_terminates_group_p (next_insn, previous_group)));
+ /* Is next_insn going to start a new group? */
+ group_end
+ = (can_issue_more == 0
+ || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
+ || (can_issue_more <= 2 && is_cracked_insn (next_insn))
+ || (can_issue_more < issue_rate &&
+ insn_terminates_group_p (next_insn, previous_group)));
- can_issue_more = force_new_group (sched_verbose, dump, group_insns,
- next_insn, &group_end, can_issue_more, &group_count);
+ can_issue_more = force_new_group (sched_verbose, dump, group_insns,
+ next_insn, &group_end, can_issue_more,
+ &group_count);
if (group_end)
- {
- group_count++;
- can_issue_more = 0;
- for (i = 0; i < issue_rate; i++)
- {
- group_insns[i] = 0;
- }
- }
+ {
+ group_count++;
+ can_issue_more = 0;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
if (GET_MODE (next_insn) == TImode && can_issue_more)
- PUT_MODE(next_insn, VOIDmode);
+ PUT_MODE (next_insn, VOIDmode);
else if (!can_issue_more && GET_MODE (next_insn) != TImode)
- PUT_MODE (next_insn, TImode);
+ PUT_MODE (next_insn, TImode);
insn = next_insn;
if (can_issue_more == 0)
- can_issue_more = issue_rate;
- } /* while */
+ can_issue_more = issue_rate;
+ } /* while */
return group_count;
}
@@ -14808,33 +17323,33 @@ pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
if (next_insn == NULL_RTX)
- break;
+ break;
if (group_end)
- {
- /* If the scheduler had marked group termination at this location
- (between insn and next_indn), and neither insn nor next_insn will
- force group termination, pad the group with nops to force group
- termination. */
- if (can_issue_more
- && (rs6000_sched_insert_nops == sched_finish_pad_groups)
- && !insn_terminates_group_p (insn, current_group)
- && !insn_terminates_group_p (next_insn, previous_group))
- {
- if (!is_branch_slot_insn(next_insn))
- can_issue_more--;
-
- while (can_issue_more)
- {
- nop = gen_nop ();
- emit_insn_before (nop, next_insn);
- can_issue_more--;
- }
- }
-
- can_issue_more = issue_rate;
- group_count++;
- }
+ {
+ /* If the scheduler had marked group termination at this location
+ (between insn and next_indn), and neither insn nor next_insn will
+ force group termination, pad the group with nops to force group
+ termination. */
+ if (can_issue_more
+ && (rs6000_sched_insert_nops == sched_finish_pad_groups)
+ && !insn_terminates_group_p (insn, current_group)
+ && !insn_terminates_group_p (next_insn, previous_group))
+ {
+ if (!is_branch_slot_insn (next_insn))
+ can_issue_more--;
+
+ while (can_issue_more)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
+ }
+
+ can_issue_more = issue_rate;
+ group_count++;
+ }
insn = next_insn;
next_insn = get_next_active_insn (insn, tail);
@@ -14857,16 +17372,16 @@ rs6000_sched_finish (FILE *dump, int sched_verbose)
if (reload_completed && rs6000_sched_groups)
{
if (rs6000_sched_insert_nops == sched_finish_none)
- return;
+ return;
if (rs6000_sched_insert_nops == sched_finish_pad_groups)
- n_groups = pad_groups (dump, sched_verbose,
- current_sched_info->prev_head,
- current_sched_info->next_tail);
+ n_groups = pad_groups (dump, sched_verbose,
+ current_sched_info->prev_head,
+ current_sched_info->next_tail);
else
- n_groups = redefine_groups (dump, sched_verbose,
- current_sched_info->prev_head,
- current_sched_info->next_tail);
+ n_groups = redefine_groups (dump, sched_verbose,
+ current_sched_info->prev_head,
+ current_sched_info->next_tail);
if (sched_verbose >= 6)
{
@@ -14887,7 +17402,7 @@ rs6000_trampoline_size (void)
switch (DEFAULT_ABI)
{
default:
- abort ();
+ gcc_unreachable ();
case ABI_AIX:
ret = (TARGET_32BIT) ? 12 : 24;
@@ -14909,25 +17424,24 @@ rs6000_trampoline_size (void)
void
rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
{
- enum machine_mode pmode = Pmode;
int regsize = (TARGET_32BIT) ? 4 : 8;
- rtx ctx_reg = force_reg (pmode, cxt);
+ rtx ctx_reg = force_reg (Pmode, cxt);
switch (DEFAULT_ABI)
{
default:
- abort ();
+ gcc_unreachable ();
/* Macros to shorten the code expansions below. */
-#define MEM_DEREF(addr) gen_rtx_MEM (pmode, memory_address (pmode, addr))
+#define MEM_DEREF(addr) gen_rtx_MEM (Pmode, memory_address (Pmode, addr))
#define MEM_PLUS(addr,offset) \
- gen_rtx_MEM (pmode, memory_address (pmode, plus_constant (addr, offset)))
+ gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (addr, offset)))
/* Under AIX, just build the 3 word function descriptor */
case ABI_AIX:
{
- rtx fn_reg = gen_reg_rtx (pmode);
- rtx toc_reg = gen_reg_rtx (pmode);
+ rtx fn_reg = gen_reg_rtx (Pmode);
+ rtx toc_reg = gen_reg_rtx (Pmode);
emit_move_insn (fn_reg, MEM_DEREF (fnaddr));
emit_move_insn (toc_reg, MEM_PLUS (fnaddr, regsize));
emit_move_insn (MEM_DEREF (addr), fn_reg);
@@ -14939,12 +17453,12 @@ rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
/* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
case ABI_DARWIN:
case ABI_V4:
- emit_library_call (gen_rtx_SYMBOL_REF (SImode, "__trampoline_setup"),
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
FALSE, VOIDmode, 4,
- addr, pmode,
+ addr, Pmode,
GEN_INT (rs6000_trampoline_size ()), SImode,
- fnaddr, pmode,
- ctx_reg, pmode);
+ fnaddr, Pmode,
+ ctx_reg, Pmode);
break;
}
@@ -14960,21 +17474,28 @@ const struct attribute_spec rs6000_attribute_table[] =
{ "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
{ "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
{ "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
+ { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+ { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+#ifdef SUBTARGET_ATTRIBUTE_TABLE
+ SUBTARGET_ATTRIBUTE_TABLE,
+#endif
{ NULL, 0, 0, false, false, false, NULL }
};
/* Handle the "altivec" attribute. The attribute may have
arguments as follows:
- __attribute__((altivec(vector__)))
- __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
- __attribute__((altivec(bool__))) (always followed by 'unsigned')
+ __attribute__((altivec(vector__)))
+ __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
+ __attribute__((altivec(bool__))) (always followed by 'unsigned')
and may appear more than once (e.g., 'vector bool char') in a
given declaration. */
static tree
-rs6000_handle_altivec_attribute (tree *node, tree name, tree args,
+rs6000_handle_altivec_attribute (tree *node,
+ tree name ATTRIBUTE_UNUSED,
+ tree args,
int flags ATTRIBUTE_UNUSED,
bool *no_add_attrs)
{
@@ -14983,7 +17504,7 @@ rs6000_handle_altivec_attribute (tree *node, tree name, tree args,
int unsigned_p;
char altivec_type
= ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
- && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
+ && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
? *IDENTIFIER_POINTER (TREE_VALUE (args))
: '?');
@@ -14995,47 +17516,66 @@ rs6000_handle_altivec_attribute (tree *node, tree name, tree args,
mode = TYPE_MODE (type);
- if (rs6000_warn_altivec_long
- && (type == long_unsigned_type_node || type == long_integer_type_node))
- warning ("use of 'long' in AltiVec types is deprecated; use 'int'");
+ /* Check for invalid AltiVec type qualifiers. */
+ if (type == long_unsigned_type_node || type == long_integer_type_node)
+ {
+ if (TARGET_64BIT)
+ error ("use of %<long%> in AltiVec types is invalid for 64-bit code");
+ else if (rs6000_warn_altivec_long)
+ warning (0, "use of %<long%> in AltiVec types is deprecated; use %<int%>");
+ }
+ else if (type == long_long_unsigned_type_node
+ || type == long_long_integer_type_node)
+ error ("use of %<long long%> in AltiVec types is invalid");
+ else if (type == double_type_node)
+ error ("use of %<double%> in AltiVec types is invalid");
+ else if (type == long_double_type_node)
+ error ("use of %<long double%> in AltiVec types is invalid");
+ else if (type == boolean_type_node)
+ error ("use of boolean types in AltiVec types is invalid");
+ else if (TREE_CODE (type) == COMPLEX_TYPE)
+ error ("use of %<complex%> in AltiVec types is invalid");
+ else if (DECIMAL_FLOAT_MODE_P (mode))
+ error ("use of decimal floating point types in AltiVec types is invalid");
switch (altivec_type)
{
case 'v':
- unsigned_p = TREE_UNSIGNED (type);
+ unsigned_p = TYPE_UNSIGNED (type);
switch (mode)
{
- case SImode:
- result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
- break;
- case HImode:
- result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
- break;
- case QImode:
- result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
- break;
- case SFmode: result = V4SF_type_node; break;
- /* If the user says 'vector int bool', we may be handed the 'bool'
- attribute _before_ the 'vector' attribute, and so select the proper
- type in the 'b' case below. */
- case V4SImode: case V8HImode: case V16QImode: result = type;
- default: break;
+ case SImode:
+ result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
+ break;
+ case HImode:
+ result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
+ break;
+ case QImode:
+ result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
+ break;
+ case SFmode: result = V4SF_type_node; break;
+ /* If the user says 'vector int bool', we may be handed the 'bool'
+ attribute _before_ the 'vector' attribute, and so select the
+ proper type in the 'b' case below. */
+ case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
+ result = type;
+ default: break;
}
break;
case 'b':
switch (mode)
{
- case SImode: case V4SImode: result = bool_V4SI_type_node; break;
- case HImode: case V8HImode: result = bool_V8HI_type_node; break;
- case QImode: case V16QImode: result = bool_V16QI_type_node;
- default: break;
+ case SImode: case V4SImode: result = bool_V4SI_type_node; break;
+ case HImode: case V8HImode: result = bool_V8HI_type_node; break;
+ case QImode: case V16QImode: result = bool_V16QI_type_node;
+ default: break;
}
break;
case 'p':
switch (mode)
{
- case V8HImode: result = pixel_V8HI_type_node;
- default: break;
+ case V8HImode: result = pixel_V8HI_type_node;
+ default: break;
}
default: break;
}
@@ -15045,9 +17585,7 @@ rs6000_handle_altivec_attribute (tree *node, tree name, tree args,
*no_add_attrs = true; /* No need to hang on to the attribute. */
- if (!result)
- warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
- else
+ if (result)
*node = reconstruct_complex_type (*node, result);
return NULL_TREE;
@@ -15064,6 +17602,14 @@ rs6000_mangle_fundamental_type (tree type)
if (type == pixel_type_node) return "u7__pixel";
if (type == bool_int_type_node) return "U6__booli";
+ /* Mangle IBM extended float long double as `g' (__float128) on
+ powerpc*-linux where long-double-64 previously was the default. */
+ if (TYPE_MAIN_VARIANT (type) == long_double_type_node
+ && TARGET_ELF
+ && TARGET_LONG_DOUBLE_128
+ && !TARGET_IEEEQUAD)
+ return "g";
+
/* For all other types, use normal C++ mangling. */
return NULL;
}
@@ -15072,16 +17618,16 @@ rs6000_mangle_fundamental_type (tree type)
struct attribute_spec.handler. */
static tree
-rs6000_handle_longcall_attribute (tree *node, tree name,
- tree args ATTRIBUTE_UNUSED,
- int flags ATTRIBUTE_UNUSED,
+rs6000_handle_longcall_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
bool *no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_TYPE
&& TREE_CODE (*node) != FIELD_DECL
&& TREE_CODE (*node) != TYPE_DECL)
{
- warning ("`%s' attribute only applies to functions",
+ warning (OPT_Wattributes, "%qs attribute only applies to functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
@@ -15100,12 +17646,16 @@ rs6000_set_default_type_attributes (tree type)
TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
NULL_TREE,
TYPE_ATTRIBUTES (type));
+
+#if TARGET_MACHO
+ darwin_set_default_type_attributes (type);
+#endif
}
/* Return a reference suitable for calling a function with the
longcall attribute. */
-struct rtx_def *
+rtx
rs6000_longcall_ref (rtx call_ref)
{
const char *call_name;
@@ -15128,59 +17678,119 @@ rs6000_longcall_ref (rtx call_ref)
return force_reg (Pmode, call_ref);
}
-#ifdef USING_ELFOS_H
+#ifndef TARGET_USE_MS_BITFIELD_LAYOUT
+#define TARGET_USE_MS_BITFIELD_LAYOUT 0
+#endif
-/* A C statement or statements to switch to the appropriate section
- for output of RTX in mode MODE. You can assume that RTX is some
- kind of constant in RTL. The argument MODE is redundant except in
- the case of a `const_int' rtx. Select the section by calling
- `text_section' or one of the alternatives for other sections.
+/* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+rs6000_handle_struct_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+{
+ tree *type = NULL;
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ type = &TREE_TYPE (*node);
+ }
+ else
+ type = node;
+
+ if (!(type && (TREE_CODE (*type) == RECORD_TYPE
+ || TREE_CODE (*type) == UNION_TYPE)))
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored", IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
- Do not define this macro if you put all constants in the read-only
- data section. */
+ else if ((is_attribute_p ("ms_struct", name)
+ && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
+ || ((is_attribute_p ("gcc_struct", name)
+ && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
+ {
+ warning (OPT_Wattributes, "%qs incompatible attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static bool
+rs6000_ms_bitfield_layout_p (tree record_type)
+{
+ return (TARGET_USE_MS_BITFIELD_LAYOUT &&
+ !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
+ || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
+}
+
+#ifdef USING_ELFOS_H
+
+/* A get_unnamed_section callback, used for switching to toc_section. */
static void
-rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
- unsigned HOST_WIDE_INT align)
+rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
{
- if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
- toc_section ();
+ if (DEFAULT_ABI == ABI_AIX
+ && TARGET_MINIMAL_TOC
+ && !TARGET_RELOCATABLE)
+ {
+ if (!toc_initialized)
+ {
+ toc_initialized = 1;
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
+ fprintf (asm_out_file, "\t.tc ");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, "\n");
+
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ }
+ else
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ }
+ else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
else
- default_elf_select_rtx_section (mode, x, align);
+ {
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ if (!toc_initialized)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ toc_initialized = 1;
+ }
+ }
}
-/* A C statement or statements to switch to the appropriate
- section for output of DECL. DECL is either a `VAR_DECL' node
- or a constant of some sort. RELOC indicates whether forming
- the initial value of DECL requires link-time relocations. */
+/* Implement TARGET_ASM_INIT_SECTIONS. */
static void
-rs6000_elf_select_section (tree decl, int reloc,
- unsigned HOST_WIDE_INT align)
+rs6000_elf_asm_init_sections (void)
{
- /* Pretend that we're always building for a shared library when
- ABI_AIX, because otherwise we end up with dynamic relocations
- in read-only sections. This happens for function pointers,
- references to vtables in typeinfo, and probably other cases. */
- default_elf_select_section_1 (decl, reloc, align,
- flag_pic || DEFAULT_ABI == ABI_AIX);
+ toc_section
+ = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
+
+ sdata2_section
+ = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
+ SDATA2_SECTION_ASM_OP);
}
-/* A C statement to build up a unique section name, expressed as a
- STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
- RELOC indicates whether the initial value of EXP requires
- link-time relocations. If you do not define this macro, GCC will use
- the symbol name prefixed by `.' as the section name. Note - this
- macro can now be called for uninitialized data items as well as
- initialized data and functions. */
+/* Implement TARGET_SELECT_RTX_SECTION. */
-static void
-rs6000_elf_unique_section (tree decl, int reloc)
+static section *
+rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
{
- /* As above, pretend that we're always building for a shared library
- when ABI_AIX, to avoid dynamic relocations in read-only sections. */
- default_unique_section_1 (decl, reloc,
- flag_pic || DEFAULT_ABI == ABI_AIX);
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
+ return toc_section;
+ else
+ return default_elf_select_rtx_section (mode, x, align);
}
/* For a SYMBOL_REF, set generic flags and then perform some
@@ -15210,7 +17820,7 @@ rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
}
}
-static bool
+bool
rs6000_elf_in_small_data_p (tree decl)
{
if (rs6000_sdata == SDATA_NONE)
@@ -15224,10 +17834,6 @@ rs6000_elf_in_small_data_p (tree decl)
if (TREE_CODE (decl) == FUNCTION_DECL)
return false;
- /* Thread-local vars can't go in the small data area. */
- if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl))
- return false;
-
if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
{
const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
@@ -15255,7 +17861,14 @@ rs6000_elf_in_small_data_p (tree decl)
}
#endif /* USING_ELFOS_H */
+
+/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
+static bool
+rs6000_use_blocks_for_constant_p (enum machine_mode mode, rtx x)
+{
+ return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
+}
/* Return a REG that occurs in ADDR with coefficient 1.
ADDR can be effectively incremented by incrementing REG.
@@ -15264,7 +17877,7 @@ rs6000_elf_in_small_data_p (tree decl)
register by this routine since our caller will try to
increment the returned register via an "la" instruction. */
-struct rtx_def *
+rtx
find_addr_reg (rtx addr)
{
while (GET_CODE (addr) == PLUS)
@@ -15280,11 +17893,10 @@ find_addr_reg (rtx addr)
else if (CONSTANT_P (XEXP (addr, 1)))
addr = XEXP (addr, 0);
else
- abort ();
+ gcc_unreachable ();
}
- if (GET_CODE (addr) == REG && REGNO (addr) != 0)
- return addr;
- abort ();
+ gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
+ return addr;
}
void
@@ -15295,42 +17907,17 @@ rs6000_fatal_bad_address (rtx op)
#if TARGET_MACHO
-#if 0
-/* Returns 1 if OP is either a symbol reference or a sum of a symbol
- reference and a constant. */
-
-int
-symbolic_operand (rtx op)
-{
- switch (GET_CODE (op))
- {
- case SYMBOL_REF:
- case LABEL_REF:
- return 1;
- case CONST:
- op = XEXP (op, 0);
- return (GET_CODE (op) == SYMBOL_REF ||
- (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
- || GET_CODE (XEXP (op, 0)) == LABEL_REF)
- && GET_CODE (XEXP (op, 1)) == CONST_INT);
- default:
- return 0;
- }
-}
-#endif
-
-#if TARGET_MACHO
-
static tree branch_island_list = 0;
/* Remember to generate a branch island for far calls to the given
function. */
-static void
-add_compiler_branch_island (tree label_name, tree function_name, int line_number)
+static void
+add_compiler_branch_island (tree label_name, tree function_name,
+ int line_number)
{
tree branch_island = build_tree_list (function_name, label_name);
- TREE_TYPE (branch_island) = build_int_2 (line_number, 0);
+ TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number);
TREE_CHAIN (branch_island) = branch_island_list;
branch_island_list = branch_island;
}
@@ -15359,8 +17946,7 @@ macho_branch_islands (void)
const char *label =
IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island));
const char *name =
- darwin_strip_name_encoding (
- IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island)));
+ IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island));
char name_buf[512];
/* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
if (name[0] == '*' || name[0] == '&')
@@ -15374,8 +17960,7 @@ macho_branch_islands (void)
strcat (tmp_buf, label);
#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
- fprintf (asm_out_file, "\t.stabd 68,0," HOST_WIDE_INT_PRINT_UNSIGNED "\n",
- BRANCH_ISLAND_LINE_NUMBER(branch_island));
+ dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
if (flag_pic)
{
@@ -15384,21 +17969,21 @@ macho_branch_islands (void)
strcat (tmp_buf, "_pic\n");
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic:\n\tmflr r11\n");
-
+
strcat (tmp_buf, "\taddis r11,r11,ha16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic)\n");
-
+
strcat (tmp_buf, "\tmtlr r0\n");
-
+
strcat (tmp_buf, "\taddi r12,r11,lo16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic)\n");
-
+
strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
}
else
@@ -15412,8 +17997,7 @@ macho_branch_islands (void)
output_asm_insn (tmp_buf, 0);
#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
- fprintf(asm_out_file, "\t.stabd 68,0," HOST_WIDE_INT_PRINT_UNSIGNED "\n",
- BRANCH_ISLAND_LINE_NUMBER (branch_island));
+ dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
}
@@ -15450,21 +18034,31 @@ get_prev_label (tree function_name)
return 0;
}
+#ifndef DARWIN_LINKER_GENERATES_ISLANDS
+#define DARWIN_LINKER_GENERATES_ISLANDS 0
+#endif
+
+/* KEXTs still need branch islands. */
+#define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
+ || flag_mkernel || flag_apple_kext)
+
/* INSN is either a function call or a millicode call. It may have an
- unconditional jump in its delay slot.
+ unconditional jump in its delay slot.
CALL_DEST is the routine we are calling. */
char *
-output_call (rtx insn, rtx *operands, int dest_operand_number, int cookie_operand_number)
+output_call (rtx insn, rtx *operands, int dest_operand_number,
+ int cookie_operand_number)
{
static char buf[256];
- if (GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
+ if (DARWIN_GENERATE_ISLANDS
+ && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
&& (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
{
tree labelname;
tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
-
+
if (no_previous_def (funname))
{
int line_number = 0;
@@ -15495,8 +18089,6 @@ output_call (rtx insn, rtx *operands, int dest_operand_number, int cookie_operan
return buf;
}
-#endif /* TARGET_MACHO */
-
/* Generate PIC and indirect symbol stubs. */
void
@@ -15519,43 +18111,53 @@ machopic_output_stub (FILE *file, const char *symb, const char *stub)
GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
if (flag_pic == 2)
- machopic_picsymbol_stub1_section ();
+ switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
else
- machopic_symbol_stub1_section ();
- fprintf (file, "\t.align 2\n");
-
- fprintf (file, "%s:\n", stub);
- fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+ switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
if (flag_pic == 2)
{
+ fprintf (file, "\t.align 5\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
label++;
- local_label_0 = alloca (sizeof("\"L0000000000$spb\""));
+ local_label_0 = alloca (sizeof ("\"L00000000000$spb\""));
sprintf (local_label_0, "\"L%011d$spb\"", label);
-
+
fprintf (file, "\tmflr r0\n");
fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
lazy_ptr_name, local_label_0);
fprintf (file, "\tmtlr r0\n");
- fprintf (file, "\tlwzu r12,lo16(%s-%s)(r11)\n",
+ fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
+ (TARGET_64BIT ? "ldu" : "lwzu"),
lazy_ptr_name, local_label_0);
fprintf (file, "\tmtctr r12\n");
fprintf (file, "\tbctr\n");
}
else
- {
- fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
- fprintf (file, "\tlwzu r12,lo16(%s)(r11)\n", lazy_ptr_name);
- fprintf (file, "\tmtctr r12\n");
- fprintf (file, "\tbctr\n");
- }
-
- machopic_lazy_symbol_ptr_section ();
+ {
+ fprintf (file, "\t.align 4\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
+ fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
+ (TARGET_64BIT ? "ldu" : "lwzu"),
+ lazy_ptr_name);
+ fprintf (file, "\tmtctr r12\n");
+ fprintf (file, "\tbctr\n");
+ }
+
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
fprintf (file, "%s:\n", lazy_ptr_name);
fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
- fprintf (file, "\t.long dyld_stub_binding_helper\n");
+ fprintf (file, "%sdyld_stub_binding_helper\n",
+ (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
}
/* Legitimize PIC addresses. If the address is already
@@ -15563,10 +18165,10 @@ machopic_output_stub (FILE *file, const char *symb, const char *stub)
position-independent addresses go into a reg. This is REG if non
zero, otherwise we allocate register(s) as necessary. */
-#define SMALL_INT(X) ((unsigned) (INTVAL(X) + 0x8000) < 0x10000)
+#define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
rtx
-rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
+rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
rtx reg)
{
rtx base, offset;
@@ -15576,25 +18178,22 @@ rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
if (GET_CODE (orig) == CONST)
{
+ rtx reg_temp;
+
if (GET_CODE (XEXP (orig, 0)) == PLUS
&& XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
return orig;
- if (GET_CODE (XEXP (orig, 0)) == PLUS)
- {
- /* Use a different reg for the intermediate value, as
- it will be marked UNCHANGING. */
- rtx reg_temp = no_new_pseudos ? reg : gen_reg_rtx (Pmode);
+ gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
- base =
- rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
- Pmode, reg_temp);
- offset =
- rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
- Pmode, reg);
- }
- else
- abort ();
+ /* Use a different reg for the intermediate value, as
+ it will be marked UNCHANGING. */
+ reg_temp = no_new_pseudos ? reg : gen_reg_rtx (Pmode);
+ base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
+ Pmode, reg_temp);
+ offset =
+ rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
+ Pmode, reg);
if (GET_CODE (offset) == CONST_INT)
{
@@ -15608,31 +18207,78 @@ rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
return machopic_legitimize_pic_address (mem, Pmode, reg);
}
}
- return gen_rtx (PLUS, Pmode, base, offset);
+ return gen_rtx_PLUS (Pmode, base, offset);
}
/* Fall back on generic machopic code. */
return machopic_legitimize_pic_address (orig, mode, reg);
}
-/* This is just a placeholder to make linking work without having to
- add this to the generic Darwin EXTRA_SECTIONS. If -mcall-aix is
- ever needed for Darwin (not too likely!) this would have to get a
- real definition. */
+/* Output a .machine directive for the Darwin assembler, and call
+ the generic start_file routine. */
-void
-toc_section (void)
+static void
+rs6000_darwin_file_start (void)
{
+ static const struct
+ {
+ const char *arg;
+ const char *name;
+ int if_set;
+ } mapping[] = {
+ { "ppc64", "ppc64", MASK_64BIT },
+ { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
+ { "power4", "ppc970", 0 },
+ { "G5", "ppc970", 0 },
+ { "7450", "ppc7450", 0 },
+ { "7400", "ppc7400", MASK_ALTIVEC },
+ { "G4", "ppc7400", 0 },
+ { "750", "ppc750", 0 },
+ { "740", "ppc750", 0 },
+ { "G3", "ppc750", 0 },
+ { "604e", "ppc604e", 0 },
+ { "604", "ppc604", 0 },
+ { "603e", "ppc603", 0 },
+ { "603", "ppc603", 0 },
+ { "601", "ppc601", 0 },
+ { NULL, "ppc", 0 } };
+ const char *cpu_id = "";
+ size_t i;
+
+ rs6000_file_start ();
+ darwin_file_start ();
+
+ /* Determine the argument to -mcpu=. Default to G3 if not specified. */
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ if (rs6000_select[i].set_arch_p && rs6000_select[i].string
+ && rs6000_select[i].string[0] != '\0')
+ cpu_id = rs6000_select[i].string;
+
+ /* Look through the mapping array. Pick the first name that either
+ matches the argument, has a bit set in IF_SET that is also set
+ in the target flags, or has a NULL name. */
+
+ i = 0;
+ while (mapping[i].arg != NULL
+ && strcmp (mapping[i].arg, cpu_id) != 0
+ && (mapping[i].if_set & target_flags) == 0)
+ i++;
+
+ fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
}
#endif /* TARGET_MACHO */
#if TARGET_ELF
-static unsigned int
-rs6000_elf_section_type_flags (tree decl, const char *name, int reloc)
+static int
+rs6000_elf_reloc_rw_mask (void)
{
- return default_section_type_flags_1 (decl, name, reloc,
- flag_pic || DEFAULT_ABI == ABI_AIX);
+ if (flag_pic)
+ return 3;
+ else if (DEFAULT_ABI == ABI_AIX)
+ return 2;
+ else
+ return 0;
}
/* Record an element in the table of global constructors. SYMBOL is
@@ -15651,14 +18297,14 @@ rs6000_elf_asm_out_constructor (rtx symbol, int priority)
if (priority != DEFAULT_INIT_PRIORITY)
{
sprintf (buf, ".ctors.%.5u",
- /* Invert the numbering so the linker puts us in the proper
- order; constructors are run from right to left, and the
- linker sorts in increasing order. */
- MAX_INIT_PRIORITY - priority);
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
section = buf;
}
- named_section_flags (section, SECTION_WRITE);
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
assemble_align (POINTER_SIZE);
if (TARGET_RELOCATABLE)
@@ -15680,14 +18326,14 @@ rs6000_elf_asm_out_destructor (rtx symbol, int priority)
if (priority != DEFAULT_INIT_PRIORITY)
{
sprintf (buf, ".dtors.%.5u",
- /* Invert the numbering so the linker puts us in the proper
- order; constructors are run from right to left, and the
- linker sorts in increasing order. */
- MAX_INIT_PRIORITY - priority);
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
section = buf;
}
- named_section_flags (section, SECTION_WRITE);
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
assemble_align (POINTER_SIZE);
if (TARGET_RELOCATABLE)
@@ -15708,26 +18354,32 @@ rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
ASM_OUTPUT_LABEL (file, name);
fputs (DOUBLE_INT_ASM_OP, file);
- putc ('.', file);
- assemble_name (file, name);
- fputs (",.TOC.@tocbase,0\n\t.previous\n\t.size\t", file);
- assemble_name (file, name);
- fputs (",24\n\t.type\t.", file);
- assemble_name (file, name);
- fputs (",@function\n", file);
- if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
- {
- fputs ("\t.globl\t.", file);
+ rs6000_output_function_entry (file, name);
+ fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
+ if (DOT_SYMBOLS)
+ {
+ fputs ("\t.size\t", file);
+ assemble_name (file, name);
+ fputs (",24\n\t.type\t.", file);
assemble_name (file, name);
- putc ('\n', file);
+ fputs (",@function\n", file);
+ if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
+ {
+ fputs ("\t.globl\t.", file);
+ assemble_name (file, name);
+ putc ('\n', file);
+ }
}
+ else
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
- putc ('.', file);
- ASM_OUTPUT_LABEL (file, name);
+ rs6000_output_function_entry (file, name);
+ fputs (":\n", file);
return;
}
if (TARGET_RELOCATABLE
+ && !TARGET_SECURE_PLT
&& (get_pool_size () != 0 || current_function_profile)
&& uses_TOC ())
{
@@ -15780,6 +18432,16 @@ rs6000_elf_end_indicate_exec_stack (void)
#if TARGET_XCOFF
static void
+rs6000_xcoff_asm_output_anchor (rtx symbol)
+{
+ char buffer[100];
+
+ sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
+ SYMBOL_REF_BLOCK_OFFSET (symbol));
+ ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
+}
+
+static void
rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
{
fputs (GLOBAL_ASM_OP, stream);
@@ -15787,8 +18449,82 @@ rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
putc ('\n', stream);
}
+/* A get_unnamed_decl callback, used for read-only sections. PTR
+ points to the section string variable. */
+
+static void
+rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RO],3\n",
+ *(const char *const *) directive);
+}
+
+/* Likewise for read-write sections. */
+
+static void
+rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RW],3\n",
+ *(const char *const *) directive);
+}
+
+/* A get_unnamed_section callback, used for switching to toc_section. */
+
+static void
+rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ if (TARGET_MINIMAL_TOC)
+ {
+ /* toc_section is always selected at least once from
+ rs6000_xcoff_file_start, so this is guaranteed to
+ always be defined once and only once in each file. */
+ if (!toc_initialized)
+ {
+ fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
+ fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
+ toc_initialized = 1;
+ }
+ fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
+ (TARGET_32BIT ? "" : ",3"));
+ }
+ else
+ fputs ("\t.toc\n", asm_out_file);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+rs6000_xcoff_asm_init_sections (void)
+{
+ read_only_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_read_only_section_name);
+
+ private_data_section
+ = get_unnamed_section (SECTION_WRITE,
+ rs6000_xcoff_output_readwrite_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ read_only_private_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ toc_section
+ = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
+
+ readonly_data_section = read_only_data_section;
+ exception_section = data_section;
+}
+
+static int
+rs6000_xcoff_reloc_rw_mask (void)
+{
+ return 3;
+}
+
static void
-rs6000_xcoff_asm_named_section (const char *name, unsigned int flags)
+rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
{
int smclass;
static const char * const suffix[3] = { "PR", "RO", "RW" };
@@ -15805,23 +18541,23 @@ rs6000_xcoff_asm_named_section (const char *name, unsigned int flags)
name, suffix[smclass], flags & SECTION_ENTSIZE);
}
-static void
-rs6000_xcoff_select_section (tree decl, int reloc,
- unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+static section *
+rs6000_xcoff_select_section (tree decl, int reloc,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
{
- if (decl_readonly_section_1 (decl, reloc, 1))
+ if (decl_readonly_section (decl, reloc))
{
if (TREE_PUBLIC (decl))
- read_only_data_section ();
+ return read_only_data_section;
else
- read_only_private_data_section ();
+ return read_only_private_data_section;
}
else
{
if (TREE_PUBLIC (decl))
- data_section ();
+ return data_section;
else
- private_data_section ();
+ return private_data_section;
}
}
@@ -15850,14 +18586,14 @@ rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
However, if this is being placed in the TOC it must be output as a
toc entry. */
-static void
-rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
- unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+static section *
+rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
{
if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
- toc_section ();
+ return toc_section;
else
- read_only_private_data_section ();
+ return read_only_private_data_section;
}
/* Remove any trailing [DS] or the like from the symbol name. */
@@ -15881,7 +18617,7 @@ static unsigned int
rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
{
unsigned int align;
- unsigned int flags = default_section_type_flags_1 (decl, name, reloc, 1);
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
/* Align to at least UNIT size. */
if (flags & SECTION_CODE)
@@ -15920,10 +18656,9 @@ rs6000_xcoff_file_start (void)
fputs ("\t.file\t", asm_out_file);
output_quoted_string (asm_out_file, main_input_filename);
fputc ('\n', asm_out_file);
- toc_section ();
if (write_symbols != NO_DEBUG)
- private_data_section ();
- text_section ();
+ switch_to_section (private_data_section);
+ switch_to_section (text_section);
if (profile_flag)
fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
rs6000_file_start ();
@@ -15935,256 +18670,390 @@ rs6000_xcoff_file_start (void)
static void
rs6000_xcoff_file_end (void)
{
- text_section ();
+ switch_to_section (text_section);
fputs ("_section_.text:\n", asm_out_file);
- data_section ();
+ switch_to_section (data_section);
fputs (TARGET_32BIT
? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
asm_out_file);
}
#endif /* TARGET_XCOFF */
-#if TARGET_MACHO
-/* Cross-module name binding. Darwin does not support overriding
- functions at dynamic-link time. */
-
-static bool
-rs6000_binds_local_p (tree decl)
-{
- return default_binds_local_p_1 (decl, 0);
-}
-#endif
-
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
scanned. In either case, *TOTAL contains the cost result. */
static bool
-rs6000_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED,
- int *total)
+rs6000_rtx_costs (rtx x, int code, int outer_code, int *total)
{
+ enum machine_mode mode = GET_MODE (x);
+
switch (code)
{
- /* On the RS/6000, if it is valid in the insn, it is free.
- So this always returns 0. */
+ /* On the RS/6000, if it is valid in the insn, it is free. */
case CONST_INT:
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
- case CONST_DOUBLE:
- case HIGH:
- *total = 0;
- return true;
-
- case PLUS:
- *total = ((GET_CODE (XEXP (x, 1)) == CONST_INT
- && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1))
- + 0x8000) >= 0x10000)
- && ((INTVAL (XEXP (x, 1)) & 0xffff) != 0))
- ? COSTS_N_INSNS (2)
- : COSTS_N_INSNS (1));
- return true;
-
- case AND:
- case IOR:
- case XOR:
- *total = ((GET_CODE (XEXP (x, 1)) == CONST_INT
- && (INTVAL (XEXP (x, 1)) & (~ (HOST_WIDE_INT) 0xffff)) != 0
- && ((INTVAL (XEXP (x, 1)) & 0xffff) != 0))
- ? COSTS_N_INSNS (2)
- : COSTS_N_INSNS (1));
- return true;
-
- case MULT:
- if (optimize_size)
+ if (((outer_code == SET
+ || outer_code == PLUS
+ || outer_code == MINUS)
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_L (x)))
+ || (outer_code == AND
+ && (satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))
+ || mask_operand (x, mode)
+ || (mode == DImode
+ && mask64_operand (x, DImode))))
+ || ((outer_code == IOR || outer_code == XOR)
+ && (satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))))
+ || outer_code == ASHIFT
+ || outer_code == ASHIFTRT
+ || outer_code == LSHIFTRT
+ || outer_code == ROTATE
+ || outer_code == ROTATERT
+ || outer_code == ZERO_EXTRACT
+ || (outer_code == MULT
+ && satisfies_constraint_I (x))
+ || ((outer_code == DIV || outer_code == UDIV
+ || outer_code == MOD || outer_code == UMOD)
+ && exact_log2 (INTVAL (x)) >= 0)
+ || (outer_code == COMPARE
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_K (x)))
+ || (outer_code == EQ
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))))
+ || (outer_code == GTU
+ && satisfies_constraint_I (x))
+ || (outer_code == LTU
+ && satisfies_constraint_P (x)))
{
- *total = COSTS_N_INSNS (2);
+ *total = 0;
return true;
}
- switch (rs6000_cpu)
+ else if ((outer_code == PLUS
+ && reg_or_add_cint_operand (x, VOIDmode))
+ || (outer_code == MINUS
+ && reg_or_sub_cint_operand (x, VOIDmode))
+ || ((outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && (INTVAL (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
{
- case PROCESSOR_RIOS1:
- case PROCESSOR_PPC405:
- *total = (GET_CODE (XEXP (x, 1)) != CONST_INT
- ? COSTS_N_INSNS (5)
- : (INTVAL (XEXP (x, 1)) >= -256
- && INTVAL (XEXP (x, 1)) <= 255)
- ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4));
- return true;
-
- case PROCESSOR_PPC440:
- *total = (GET_CODE (XEXP (x, 1)) != CONST_INT
- ? COSTS_N_INSNS (3)
- : COSTS_N_INSNS (2));
- return true;
-
- case PROCESSOR_RS64A:
- *total = (GET_CODE (XEXP (x, 1)) != CONST_INT
- ? GET_MODE (XEXP (x, 1)) != DImode
- ? COSTS_N_INSNS (20) : COSTS_N_INSNS (34)
- : (INTVAL (XEXP (x, 1)) >= -256
- && INTVAL (XEXP (x, 1)) <= 255)
- ? COSTS_N_INSNS (8) : COSTS_N_INSNS (12));
- return true;
-
- case PROCESSOR_RIOS2:
- case PROCESSOR_MPCCORE:
- case PROCESSOR_PPC604e:
- *total = COSTS_N_INSNS (2);
- return true;
-
- case PROCESSOR_PPC601:
- *total = COSTS_N_INSNS (5);
+ *total = COSTS_N_INSNS (1);
return true;
+ }
+ /* FALLTHRU */
- case PROCESSOR_PPC603:
- case PROCESSOR_PPC7400:
- case PROCESSOR_PPC750:
- *total = (GET_CODE (XEXP (x, 1)) != CONST_INT
- ? COSTS_N_INSNS (5)
- : (INTVAL (XEXP (x, 1)) >= -256
- && INTVAL (XEXP (x, 1)) <= 255)
- ? COSTS_N_INSNS (2) : COSTS_N_INSNS (3));
- return true;
+ case CONST_DOUBLE:
+ if (mode == DImode && code == CONST_DOUBLE)
+ {
+ if ((outer_code == IOR || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0
+ && (CONST_DOUBLE_LOW (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
+ {
+ *total = 0;
+ return true;
+ }
+ else if ((outer_code == AND && and64_2_operand (x, DImode))
+ || ((outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ }
+ /* FALLTHRU */
- case PROCESSOR_PPC7450:
- *total = (GET_CODE (XEXP (x, 1)) != CONST_INT
- ? COSTS_N_INSNS (4)
- : COSTS_N_INSNS (3));
- return true;
+ case CONST:
+ case HIGH:
+ case SYMBOL_REF:
+ case MEM:
+ /* When optimizing for size, MEM should be slightly more expensive
+ than generating address, e.g., (plus (reg) (const)).
+ L1 cache latency is about two instructions. */
+ *total = optimize_size ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
+ return true;
- case PROCESSOR_PPC403:
- case PROCESSOR_PPC604:
- case PROCESSOR_PPC8540:
- *total = COSTS_N_INSNS (4);
- return true;
+ case LABEL_REF:
+ *total = 0;
+ return true;
- case PROCESSOR_PPC620:
- case PROCESSOR_PPC630:
- *total = (GET_CODE (XEXP (x, 1)) != CONST_INT
- ? GET_MODE (XEXP (x, 1)) != DImode
- ? COSTS_N_INSNS (5) : COSTS_N_INSNS (7)
- : (INTVAL (XEXP (x, 1)) >= -256
- && INTVAL (XEXP (x, 1)) <= 255)
- ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4));
- return true;
+ case PLUS:
+ if (mode == DFmode)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = rs6000_cost->dmul - rs6000_cost->fp;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
+ else if (mode == SFmode)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
- case PROCESSOR_POWER4:
- case PROCESSOR_POWER5:
- *total = (GET_CODE (XEXP (x, 1)) != CONST_INT
- ? GET_MODE (XEXP (x, 1)) != DImode
- ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4)
- : COSTS_N_INSNS (2));
- return true;
+ case MINUS:
+ if (mode == DFmode)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = 0;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
+ else if (mode == SFmode)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
- default:
- abort ();
+ case MULT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && satisfies_constraint_I (XEXP (x, 1)))
+ {
+ if (INTVAL (XEXP (x, 1)) >= -256
+ && INTVAL (XEXP (x, 1)) <= 255)
+ *total = rs6000_cost->mulsi_const9;
+ else
+ *total = rs6000_cost->mulsi_const;
}
+ /* FMA accounted in outer PLUS/MINUS. */
+ else if ((mode == DFmode || mode == SFmode)
+ && (outer_code == PLUS || outer_code == MINUS))
+ *total = 0;
+ else if (mode == DFmode)
+ *total = rs6000_cost->dmul;
+ else if (mode == SFmode)
+ *total = rs6000_cost->fp;
+ else if (mode == DImode)
+ *total = rs6000_cost->muldi;
+ else
+ *total = rs6000_cost->mulsi;
+ return false;
case DIV:
case MOD:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
- && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
+ if (FLOAT_MODE_P (mode))
{
- *total = COSTS_N_INSNS (2);
- return true;
+ *total = mode == DFmode ? rs6000_cost->ddiv
+ : rs6000_cost->sdiv;
+ return false;
}
/* FALLTHRU */
case UDIV:
case UMOD:
- switch (rs6000_cpu)
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
{
- case PROCESSOR_RIOS1:
- *total = COSTS_N_INSNS (19);
- return true;
+ if (code == DIV || code == MOD)
+ /* Shift, addze */
+ *total = COSTS_N_INSNS (2);
+ else
+ /* Shift */
+ *total = COSTS_N_INSNS (1);
+ }
+ else
+ {
+ if (GET_MODE (XEXP (x, 1)) == DImode)
+ *total = rs6000_cost->divdi;
+ else
+ *total = rs6000_cost->divsi;
+ }
+ /* Add in shift and subtract for MOD. */
+ if (code == MOD || code == UMOD)
+ *total += COSTS_N_INSNS (2);
+ return false;
- case PROCESSOR_RIOS2:
- *total = COSTS_N_INSNS (13);
- return true;
+ case FFS:
+ *total = COSTS_N_INSNS (4);
+ return false;
- case PROCESSOR_RS64A:
- *total = (GET_MODE (XEXP (x, 1)) != DImode
- ? COSTS_N_INSNS (65)
- : COSTS_N_INSNS (67));
- return true;
+ case NOT:
+ if (outer_code == AND || outer_code == IOR || outer_code == XOR)
+ {
+ *total = 0;
+ return false;
+ }
+ /* FALLTHRU */
- case PROCESSOR_MPCCORE:
- *total = COSTS_N_INSNS (6);
- return true;
+ case AND:
+ case IOR:
+ case XOR:
+ case ZERO_EXTRACT:
+ *total = COSTS_N_INSNS (1);
+ return false;
- case PROCESSOR_PPC403:
- *total = COSTS_N_INSNS (33);
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ /* Handle mul_highpart. */
+ if (outer_code == TRUNCATE
+ && GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ if (mode == DImode)
+ *total = rs6000_cost->muldi;
+ else
+ *total = rs6000_cost->mulsi;
return true;
+ }
+ else if (outer_code == AND)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
- case PROCESSOR_PPC405:
- *total = COSTS_N_INSNS (35);
- return true;
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
- case PROCESSOR_PPC440:
- *total = COSTS_N_INSNS (34);
- return true;
+ case COMPARE:
+ case NEG:
+ case ABS:
+ if (!FLOAT_MODE_P (mode))
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+ /* FALLTHRU */
- case PROCESSOR_PPC601:
- *total = COSTS_N_INSNS (36);
- return true;
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ case FIX:
+ case UNSIGNED_FIX:
+ case FLOAT_TRUNCATE:
+ *total = rs6000_cost->fp;
+ return false;
- case PROCESSOR_PPC603:
- *total = COSTS_N_INSNS (37);
- return true;
+ case FLOAT_EXTEND:
+ if (mode == DFmode)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ return false;
- case PROCESSOR_PPC604:
- case PROCESSOR_PPC604e:
- *total = COSTS_N_INSNS (20);
+ case UNSPEC:
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_FRSP:
+ *total = rs6000_cost->fp;
return true;
- case PROCESSOR_PPC620:
- case PROCESSOR_PPC630:
- *total = (GET_MODE (XEXP (x, 1)) != DImode
- ? COSTS_N_INSNS (21)
- : COSTS_N_INSNS (37));
- return true;
+ default:
+ break;
+ }
+ break;
- case PROCESSOR_PPC750:
- case PROCESSOR_PPC8540:
- case PROCESSOR_PPC7400:
- *total = COSTS_N_INSNS (19);
+ case CALL:
+ case IF_THEN_ELSE:
+ if (optimize_size)
+ {
+ *total = COSTS_N_INSNS (1);
return true;
+ }
+ else if (FLOAT_MODE_P (mode)
+ && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
+ {
+ *total = rs6000_cost->fp;
+ return false;
+ }
+ break;
- case PROCESSOR_PPC7450:
- *total = COSTS_N_INSNS (23);
+ case EQ:
+ case GTU:
+ case LTU:
+ /* Carry bit requires mode == Pmode.
+ NEG or PLUS already counted so only add one. */
+ if (mode == Pmode
+ && (outer_code == NEG || outer_code == PLUS))
+ {
+ *total = COSTS_N_INSNS (1);
return true;
+ }
+ if (outer_code == SET)
+ {
+ if (XEXP (x, 1) == const0_rtx)
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ else if (mode == Pmode)
+ {
+ *total = COSTS_N_INSNS (3);
+ return false;
+ }
+ }
+ /* FALLTHRU */
- case PROCESSOR_POWER4:
- case PROCESSOR_POWER5:
- *total = (GET_MODE (XEXP (x, 1)) != DImode
- ? COSTS_N_INSNS (18)
- : COSTS_N_INSNS (34));
+ case GT:
+ case LT:
+ case UNORDERED:
+ if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
+ {
+ *total = COSTS_N_INSNS (2);
return true;
-
- default:
- abort ();
}
-
- case FFS:
- *total = COSTS_N_INSNS (4);
- return true;
-
- case MEM:
- /* MEM should be slightly more expensive than (plus (reg) (const)). */
- *total = 5;
- return true;
+ /* CC COMPARE. */
+ if (outer_code == COMPARE)
+ {
+ *total = 0;
+ return true;
+ }
+ break;
default:
- return false;
+ break;
}
+
+ return false;
}
/* A C expression returning the cost of moving data from a register of class
CLASS1 to one of CLASS2. */
int
-rs6000_register_move_cost (enum machine_mode mode,
+rs6000_register_move_cost (enum machine_mode mode,
enum reg_class from, enum reg_class to)
{
/* Moves from/to GENERAL_REGS. */
@@ -16198,22 +19067,23 @@ rs6000_register_move_cost (enum machine_mode mode,
return (rs6000_memory_move_cost (mode, from, 0)
+ rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
-/* It's more expensive to move CR_REGS than CR0_REGS because of the shift.... */
+ /* It's more expensive to move CR_REGS than CR0_REGS because of the
+ shift. */
else if (from == CR_REGS)
return 4;
else
-/* A move will cost one instruction per GPR moved. */
- return 2 * HARD_REGNO_NREGS (0, mode);
+ /* A move will cost one instruction per GPR moved. */
+ return 2 * hard_regno_nregs[0][mode];
}
-/* Moving between two similar registers is just one instruction. */
+ /* Moving between two similar registers is just one instruction. */
else if (reg_classes_intersect_p (to, from))
return mode == TFmode ? 4 : 2;
-/* Everything else has to go through GENERAL_REGS. */
+ /* Everything else has to go through GENERAL_REGS. */
else
- return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
+ return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
+ rs6000_register_move_cost (mode, from, GENERAL_REGS));
}
@@ -16221,19 +19091,122 @@ rs6000_register_move_cost (enum machine_mode mode,
or from memory. */
int
-rs6000_memory_move_cost (enum machine_mode mode, enum reg_class class,
+rs6000_memory_move_cost (enum machine_mode mode, enum reg_class class,
int in ATTRIBUTE_UNUSED)
{
if (reg_classes_intersect_p (class, GENERAL_REGS))
- return 4 * HARD_REGNO_NREGS (0, mode);
+ return 4 * hard_regno_nregs[0][mode];
else if (reg_classes_intersect_p (class, FLOAT_REGS))
- return 4 * HARD_REGNO_NREGS (32, mode);
+ return 4 * hard_regno_nregs[32][mode];
else if (reg_classes_intersect_p (class, ALTIVEC_REGS))
- return 4 * HARD_REGNO_NREGS (FIRST_ALTIVEC_REGNO, mode);
+ return 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
else
return 4 + rs6000_register_move_cost (mode, class, GENERAL_REGS);
}
+/* Newton-Raphson approximation of single-precision floating point divide n/d.
+ Assumes no trapping math and finite arguments. */
+
+void
+rs6000_emit_swdivsf (rtx res, rtx n, rtx d)
+{
+ rtx x0, e0, e1, y1, u0, v0, one;
+
+ x0 = gen_reg_rtx (SFmode);
+ e0 = gen_reg_rtx (SFmode);
+ e1 = gen_reg_rtx (SFmode);
+ y1 = gen_reg_rtx (SFmode);
+ u0 = gen_reg_rtx (SFmode);
+ v0 = gen_reg_rtx (SFmode);
+ one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
+
+ /* x0 = 1./d estimate */
+ emit_insn (gen_rtx_SET (VOIDmode, x0,
+ gen_rtx_UNSPEC (SFmode, gen_rtvec (1, d),
+ UNSPEC_FRES)));
+ /* e0 = 1. - d * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e0,
+ gen_rtx_MINUS (SFmode, one,
+ gen_rtx_MULT (SFmode, d, x0))));
+ /* e1 = e0 + e0 * e0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e1,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, e0, e0), e0)));
+ /* y1 = x0 + e1 * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, y1,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, e1, x0), x0)));
+ /* u0 = n * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, u0,
+ gen_rtx_MULT (SFmode, n, y1)));
+ /* v0 = n - d * u0 */
+ emit_insn (gen_rtx_SET (VOIDmode, v0,
+ gen_rtx_MINUS (SFmode, n,
+ gen_rtx_MULT (SFmode, d, u0))));
+ /* res = u0 + v0 * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, res,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, v0, y1), u0)));
+}
+
+/* Newton-Raphson approximation of double-precision floating point divide n/d.
+ Assumes no trapping math and finite arguments. */
+
+void
+rs6000_emit_swdivdf (rtx res, rtx n, rtx d)
+{
+ rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
+
+ x0 = gen_reg_rtx (DFmode);
+ e0 = gen_reg_rtx (DFmode);
+ e1 = gen_reg_rtx (DFmode);
+ e2 = gen_reg_rtx (DFmode);
+ y1 = gen_reg_rtx (DFmode);
+ y2 = gen_reg_rtx (DFmode);
+ y3 = gen_reg_rtx (DFmode);
+ u0 = gen_reg_rtx (DFmode);
+ v0 = gen_reg_rtx (DFmode);
+ one = force_reg (DFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, DFmode));
+
+ /* x0 = 1./d estimate */
+ emit_insn (gen_rtx_SET (VOIDmode, x0,
+ gen_rtx_UNSPEC (DFmode, gen_rtvec (1, d),
+ UNSPEC_FRES)));
+ /* e0 = 1. - d * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e0,
+ gen_rtx_MINUS (DFmode, one,
+ gen_rtx_MULT (SFmode, d, x0))));
+ /* y1 = x0 + e0 * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, y1,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e0, x0), x0)));
+ /* e1 = e0 * e0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e1,
+ gen_rtx_MULT (DFmode, e0, e0)));
+ /* y2 = y1 + e1 * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, y2,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e1, y1), y1)));
+ /* e2 = e1 * e1 */
+ emit_insn (gen_rtx_SET (VOIDmode, e2,
+ gen_rtx_MULT (DFmode, e1, e1)));
+ /* y3 = y2 + e2 * y2 */
+ emit_insn (gen_rtx_SET (VOIDmode, y3,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e2, y2), y2)));
+ /* u0 = n * y3 */
+ emit_insn (gen_rtx_SET (VOIDmode, u0,
+ gen_rtx_MULT (DFmode, n, y3)));
+ /* v0 = n - d * u0 */
+ emit_insn (gen_rtx_SET (VOIDmode, v0,
+ gen_rtx_MINUS (DFmode, n,
+ gen_rtx_MULT (DFmode, d, u0))));
+ /* res = u0 + v0 * y3 */
+ emit_insn (gen_rtx_SET (VOIDmode, res,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, v0, y3), u0)));
+}
+
/* Return an RTX representing where to find the function value of a
function returning MODE. */
static rtx
@@ -16281,6 +19254,26 @@ rs6000_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
enum machine_mode mode;
unsigned int regno;
+ /* Special handling for structs in darwin64. */
+ if (rs6000_darwin64_abi
+ && TYPE_MODE (valtype) == BLKmode
+ && TREE_CODE (valtype) == RECORD_TYPE
+ && int_size_in_bytes (valtype) > 0)
+ {
+ CUMULATIVE_ARGS valcum;
+ rtx valret;
+
+ valcum.words = 0;
+ valcum.fregno = FP_ARG_MIN_REG;
+ valcum.vregno = ALTIVEC_ARG_MIN_REG;
+ /* Do a trial code generation as if this were going to be passed as
+ an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, valtype, 1, true);
+ if (valret)
+ return valret;
+ /* Otherwise fall through to standard ABI rules. */
+ }
+
if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
{
/* Long long return value need be split in -mpowerpc64, 32bit ABI. */
@@ -16294,22 +19287,46 @@ rs6000_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
GP_ARG_RETURN + 1),
GEN_INT (4))));
}
+ if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
+ {
+ return gen_rtx_PARALLEL (DCmode,
+ gen_rtvec (4,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 2),
+ GEN_INT (8)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 3),
+ GEN_INT (12))));
+ }
- if ((INTEGRAL_TYPE_P (valtype)
- && TYPE_PRECISION (valtype) < BITS_PER_WORD)
+ mode = TYPE_MODE (valtype);
+ if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
|| POINTER_TYPE_P (valtype))
mode = TARGET_32BIT ? SImode : DImode;
- else
- mode = TYPE_MODE (valtype);
- if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ regno = GP_ARG_RETURN;
+ else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS)
regno = FP_ARG_RETURN;
else if (TREE_CODE (valtype) == COMPLEX_TYPE
&& targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
else if (TREE_CODE (valtype) == VECTOR_TYPE
- && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (mode))
regno = ALTIVEC_ARG_RETURN;
+ else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
+ && (mode == DFmode || mode == DCmode))
+ return spe_build_register_parallel (mode, GP_ARG_RETURN);
else
regno = GP_ARG_RETURN;
@@ -16323,7 +19340,23 @@ rs6000_libcall_value (enum machine_mode mode)
{
unsigned int regno;
- if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
+ {
+ /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
+ return gen_rtx_PARALLEL (DImode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4))));
+ }
+
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ regno = GP_ARG_RETURN;
+ else if (SCALAR_FLOAT_MODE_P (mode)
&& TARGET_HARD_FLOAT && TARGET_FPRS)
regno = FP_ARG_RETURN;
else if (ALTIVEC_VECTOR_MODE (mode)
@@ -16331,6 +19364,9 @@ rs6000_libcall_value (enum machine_mode mode)
regno = ALTIVEC_ARG_RETURN;
else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
+ else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
+ && (mode == DFmode || mode == DCmode))
+ return spe_build_register_parallel (mode, GP_ARG_RETURN);
else
regno = GP_ARG_RETURN;
@@ -16345,29 +19381,39 @@ rs6000_initial_elimination_offset (int from, int to)
rs6000_stack_t *info = rs6000_stack_info ();
HOST_WIDE_INT offset;
- if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
offset = info->push_p ? 0 : -info->total_size;
- else if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
+ else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ {
+ offset = info->push_p ? 0 : -info->total_size;
+ if (FRAME_GROWS_DOWNWARD)
+ offset += info->fixed_size + info->vars_size + info->parm_size;
+ }
+ else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ offset = FRAME_GROWS_DOWNWARD
+ ? info->fixed_size + info->vars_size + info->parm_size
+ : 0;
+ else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
offset = info->total_size;
else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
offset = info->push_p ? info->total_size : 0;
else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
offset = 0;
else
- abort ();
+ gcc_unreachable ();
return offset;
}
-/* Return true if TYPE is of type __ev64_opaque__. */
+/* Return true if TYPE is a SPE or AltiVec opaque type. */
static bool
-is_ev64_opaque_type (tree type)
+rs6000_is_opaque_type (tree type)
{
- return (TARGET_SPE
- && (type == opaque_V2SI_type_node
+ return (type == opaque_V2SI_type_node
|| type == opaque_V2SF_type_node
- || type == opaque_p_V2SI_type_node));
+ || type == opaque_p_V2SI_type_node
+ || type == opaque_V4SI_type_node);
}
static rtx
@@ -16375,7 +19421,11 @@ rs6000_dwarf_register_span (rtx reg)
{
unsigned regno;
- if (!TARGET_SPE || !SPE_VECTOR_MODE (GET_MODE (reg)))
+ if (TARGET_SPE
+ && (SPE_VECTOR_MODE (GET_MODE (reg))
+ || (TARGET_E500_DOUBLE && GET_MODE (reg) == DFmode)))
+ ;
+ else
return NULL_RTX;
regno = REGNO (reg);
@@ -16423,10 +19473,67 @@ rs6000_dbx_register_number (unsigned int regno)
return 612;
/* SPE high reg number. We get these values of regno from
rs6000_dwarf_register_span. */
- if (regno >= 1200 && regno < 1232)
- return regno;
+ gcc_assert (regno >= 1200 && regno < 1232);
+ return regno;
+}
+
+/* target hook eh_return_filter_mode */
+static enum machine_mode
+rs6000_eh_return_filter_mode (void)
+{
+ return TARGET_32BIT ? SImode : word_mode;
+}
- abort ();
+/* Target hook for scalar_mode_supported_p. */
+static bool
+rs6000_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return true;
+ else
+ return default_scalar_mode_supported_p (mode);
+}
+
+/* Target hook for vector_mode_supported_p. */
+static bool
+rs6000_vector_mode_supported_p (enum machine_mode mode)
+{
+
+ if (TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return true;
+
+ else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
+ return true;
+
+ else
+ return false;
+}
+
+/* Target hook for invalid_arg_for_unprototyped_fn. */
+static const char *
+invalid_arg_for_unprototyped_fn (tree typelist, tree funcdecl, tree val)
+{
+ return (!rs6000_darwin64_abi
+ && typelist == 0
+ && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
+ && (funcdecl == NULL_TREE
+ || (TREE_CODE (funcdecl) == FUNCTION_DECL
+ && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
+ ? N_("AltiVec argument passed to unprototyped function")
+ : NULL;
+}
+
+/* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
+ setup by using __stack_chk_fail_local hidden function instead of
+ calling __stack_chk_fail directly. Otherwise it is better to call
+ __stack_chk_fail directly. */
+
+static tree
+rs6000_stack_protect_fail (void)
+{
+ return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
+ ? default_hidden_stack_protect_fail ()
+ : default_external_stack_protect_fail ();
}
#include "gt-rs6000.h"
diff --git a/contrib/gcc/config/rs6000/rs6000.h b/contrib/gcc/config/rs6000/rs6000.h
index 01a97da..3294bd2 100644
--- a/contrib/gcc/config/rs6000/rs6000.h
+++ b/contrib/gcc/config/rs6000/rs6000.h
@@ -1,6 +1,7 @@
/* Definitions of target machine for GNU compiler, for IBM RS/6000.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
@@ -17,8 +18,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Note that some other tm.h files include this one and then override
many of the definitions. */
@@ -40,11 +41,22 @@
#define TARGET_AIX 0
#endif
+/* Control whether function entry points use a "dot" symbol when
+ ABI_AIX. */
+#define DOT_SYMBOLS 1
+
/* Default string to use for cpu if not specified. */
#ifndef TARGET_CPU_DEFAULT
#define TARGET_CPU_DEFAULT ((char *)0)
#endif
+/* If configured for PPC405, support PPC405CR Erratum77. */
+#ifdef CONFIG_PPC405CR
+#define PPC405_ERRATUM77 (rs6000_cpu == PROCESSOR_PPC405)
+#else
+#define PPC405_ERRATUM77 0
+#endif
+
/* Common ASM definitions used by ASM_SPEC among the various targets
for handling -mcpu=xxx switches. */
#define ASM_CPU_SPEC \
@@ -61,6 +73,8 @@
%{mcpu=power3: -mppc64} \
%{mcpu=power4: -mpower4} \
%{mcpu=power5: -mpower4} \
+%{mcpu=power5+: -mpower4} \
+%{mcpu=power6: -mpower4 -maltivec} \
%{mcpu=powerpc: -mppc} \
%{mcpu=rios: -mpwr} \
%{mcpu=rios1: -mpwr} \
@@ -124,252 +138,59 @@
/* Architecture type. */
-extern int target_flags;
-
-/* Use POWER architecture instructions and MQ register. */
-#define MASK_POWER 0x00000001
-
-/* Use POWER2 extensions to POWER architecture. */
-#define MASK_POWER2 0x00000002
-
-/* Use PowerPC architecture instructions. */
-#define MASK_POWERPC 0x00000004
-
-/* Use PowerPC General Purpose group optional instructions, e.g. fsqrt. */
-#define MASK_PPC_GPOPT 0x00000008
-
-/* Use PowerPC Graphics group optional instructions, e.g. fsel. */
-#define MASK_PPC_GFXOPT 0x00000010
-
-/* Use PowerPC-64 architecture instructions. */
-#define MASK_POWERPC64 0x00000020
-
-/* Use revised mnemonic names defined for PowerPC architecture. */
-#define MASK_NEW_MNEMONICS 0x00000040
-
-/* Disable placing fp constants in the TOC; can be turned on when the
- TOC overflows. */
-#define MASK_NO_FP_IN_TOC 0x00000080
-
-/* Disable placing symbol+offset constants in the TOC; can be turned on when
- the TOC overflows. */
-#define MASK_NO_SUM_IN_TOC 0x00000100
+/* Define TARGET_MFCRF if the target assembler does not support the
+ optional field operand for mfcr. */
-/* Output only one TOC entry per module. Normally linking fails if
- there are more than 16K unique variables/constants in an executable. With
- this option, linking fails only if there are more than 16K modules, or
- if there are more than 16K unique variables/constant in a single module.
-
- This is at the cost of having 2 extra loads and one extra store per
- function, and one less allocable register. */
-#define MASK_MINIMAL_TOC 0x00000200
-
-/* Nonzero for the 64 bit ABIs: longs and pointers are 64 bits. The
- chip is running in "64-bit mode", in which CR0 is set in dot
- operations based on all 64 bits of the register, bdnz works on 64-bit
- ctr, lr is 64 bits, and so on. Requires MASK_POWERPC64. */
-#define MASK_64BIT 0x00000400
-
-/* Disable use of FPRs. */
-#define MASK_SOFT_FLOAT 0x00000800
-
-/* Enable load/store multiple, even on PowerPC */
-#define MASK_MULTIPLE 0x00001000
-
-/* Use string instructions for block moves */
-#define MASK_STRING 0x00002000
-
-/* Disable update form of load/store */
-#define MASK_NO_UPDATE 0x00004000
-
-/* Disable fused multiply/add operations */
-#define MASK_NO_FUSED_MADD 0x00008000
-
-/* Nonzero if we need to schedule the prolog and epilog. */
-#define MASK_SCHED_PROLOG 0x00010000
-
-/* Use AltiVec instructions. */
-#define MASK_ALTIVEC 0x00020000
-
-/* Return small structures in memory (as the AIX ABI requires). */
-#define MASK_AIX_STRUCT_RET 0x00040000
-
-/* Use single field mfcr instruction. */
-#define MASK_MFCRF 0x00080000
+#ifndef HAVE_AS_MFCRF
+#undef TARGET_MFCRF
+#define TARGET_MFCRF 0
+#endif
-/* The only remaining free bits are 0x00600000. linux64.h uses
- 0x00100000, and sysv4.h uses 0x00800000 -> 0x40000000.
- 0x80000000 is not available because target_flags is signed. */
+/* Define TARGET_POPCNTB if the target assembler does not support the
+ popcount byte instruction. */
-#define TARGET_POWER (target_flags & MASK_POWER)
-#define TARGET_POWER2 (target_flags & MASK_POWER2)
-#define TARGET_POWERPC (target_flags & MASK_POWERPC)
-#define TARGET_PPC_GPOPT (target_flags & MASK_PPC_GPOPT)
-#define TARGET_PPC_GFXOPT (target_flags & MASK_PPC_GFXOPT)
-#define TARGET_NEW_MNEMONICS (target_flags & MASK_NEW_MNEMONICS)
-#define TARGET_NO_FP_IN_TOC (target_flags & MASK_NO_FP_IN_TOC)
-#define TARGET_NO_SUM_IN_TOC (target_flags & MASK_NO_SUM_IN_TOC)
-#define TARGET_MINIMAL_TOC (target_flags & MASK_MINIMAL_TOC)
-#define TARGET_64BIT (target_flags & MASK_64BIT)
-#define TARGET_SOFT_FLOAT (target_flags & MASK_SOFT_FLOAT)
-#define TARGET_MULTIPLE (target_flags & MASK_MULTIPLE)
-#define TARGET_STRING (target_flags & MASK_STRING)
-#define TARGET_NO_UPDATE (target_flags & MASK_NO_UPDATE)
-#define TARGET_NO_FUSED_MADD (target_flags & MASK_NO_FUSED_MADD)
-#define TARGET_SCHED_PROLOG (target_flags & MASK_SCHED_PROLOG)
-#define TARGET_ALTIVEC (target_flags & MASK_ALTIVEC)
-#define TARGET_AIX_STRUCT_RET (target_flags & MASK_AIX_STRUCT_RET)
+#ifndef HAVE_AS_POPCNTB
+#undef TARGET_POPCNTB
+#define TARGET_POPCNTB 0
+#endif
-/* Define TARGET_MFCRF if the target assembler supports the optional
- field operand for mfcr and the target processor supports the
- instruction. */
+/* Define TARGET_FPRND if the target assembler does not support the
+ fp rounding instructions. */
-#ifdef HAVE_AS_MFCRF
-#define TARGET_MFCRF (target_flags & MASK_MFCRF)
-#else
-#define TARGET_MFCRF 0
+#ifndef HAVE_AS_FPRND
+#undef TARGET_FPRND
+#define TARGET_FPRND 0
#endif
+#ifndef TARGET_SECURE_PLT
+#define TARGET_SECURE_PLT 0
+#endif
#define TARGET_32BIT (! TARGET_64BIT)
-#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
-#define TARGET_UPDATE (! TARGET_NO_UPDATE)
-#define TARGET_FUSED_MADD (! TARGET_NO_FUSED_MADD)
-
-/* Emit a dtp-relative reference to a TLS variable. */
-
-#ifdef HAVE_AS_TLS
-#define ASM_OUTPUT_DWARF_DTPREL(FILE, SIZE, X) \
- rs6000_output_dwarf_dtprel (FILE, SIZE, X)
-#endif
#ifndef HAVE_AS_TLS
#define HAVE_AS_TLS 0
#endif
+/* Return 1 for a symbol ref for a thread-local storage symbol. */
+#define RS6000_SYMBOL_REF_TLS_P(RTX) \
+ (GET_CODE (RTX) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (RTX) != 0)
+
#ifdef IN_LIBGCC2
/* For libgcc2 we make sure this is a compile time constant */
-#if defined (__64BIT__) || defined (__powerpc64__)
+#if defined (__64BIT__) || defined (__powerpc64__) || defined (__ppc64__)
+#undef TARGET_POWERPC64
#define TARGET_POWERPC64 1
#else
+#undef TARGET_POWERPC64
#define TARGET_POWERPC64 0
#endif
#else
-#define TARGET_POWERPC64 (target_flags & MASK_POWERPC64)
+ /* The option machinery will define this. */
#endif
-#define TARGET_XL_COMPAT 0
-
-/* Run-time compilation parameters selecting different hardware subsets.
-
- Macro to define tables used to set the flags.
- This is a list in braces of pairs in braces,
- each pair being { "NAME", VALUE }
- where VALUE is the bits to set or minus the bits to clear.
- An empty string NAME is used to identify the default VALUE. */
-
-#define TARGET_SWITCHES \
- {{"power", MASK_POWER | MASK_MULTIPLE | MASK_STRING, \
- N_("Use POWER instruction set")}, \
- {"power2", (MASK_POWER | MASK_MULTIPLE | MASK_STRING \
- | MASK_POWER2), \
- N_("Use POWER2 instruction set")}, \
- {"no-power2", - MASK_POWER2, \
- N_("Do not use POWER2 instruction set")}, \
- {"no-power", - (MASK_POWER | MASK_POWER2 | MASK_MULTIPLE \
- | MASK_STRING), \
- N_("Do not use POWER instruction set")}, \
- {"powerpc", MASK_POWERPC, \
- N_("Use PowerPC instruction set")}, \
- {"no-powerpc", - (MASK_POWERPC | MASK_PPC_GPOPT \
- | MASK_PPC_GFXOPT | MASK_POWERPC64), \
- N_("Do not use PowerPC instruction set")}, \
- {"powerpc-gpopt", MASK_POWERPC | MASK_PPC_GPOPT, \
- N_("Use PowerPC General Purpose group optional instructions")},\
- {"no-powerpc-gpopt", - MASK_PPC_GPOPT, \
- N_("Do not use PowerPC General Purpose group optional instructions")},\
- {"powerpc-gfxopt", MASK_POWERPC | MASK_PPC_GFXOPT, \
- N_("Use PowerPC Graphics group optional instructions")},\
- {"no-powerpc-gfxopt", - MASK_PPC_GFXOPT, \
- N_("Do not use PowerPC Graphics group optional instructions")},\
- {"powerpc64", MASK_POWERPC64, \
- N_("Use PowerPC-64 instruction set")}, \
- {"no-powerpc64", - MASK_POWERPC64, \
- N_("Do not use PowerPC-64 instruction set")}, \
- {"altivec", MASK_ALTIVEC , \
- N_("Use AltiVec instructions")}, \
- {"no-altivec", - MASK_ALTIVEC , \
- N_("Do not use AltiVec instructions")}, \
- {"new-mnemonics", MASK_NEW_MNEMONICS, \
- N_("Use new mnemonics for PowerPC architecture")},\
- {"old-mnemonics", -MASK_NEW_MNEMONICS, \
- N_("Use old mnemonics for PowerPC architecture")},\
- {"full-toc", - (MASK_NO_FP_IN_TOC | MASK_NO_SUM_IN_TOC \
- | MASK_MINIMAL_TOC), \
- N_("Put everything in the regular TOC")}, \
- {"fp-in-toc", - MASK_NO_FP_IN_TOC, \
- N_("Place floating point constants in TOC")}, \
- {"no-fp-in-toc", MASK_NO_FP_IN_TOC, \
- N_("Do not place floating point constants in TOC")},\
- {"sum-in-toc", - MASK_NO_SUM_IN_TOC, \
- N_("Place symbol+offset constants in TOC")}, \
- {"no-sum-in-toc", MASK_NO_SUM_IN_TOC, \
- N_("Do not place symbol+offset constants in TOC")},\
- {"minimal-toc", MASK_MINIMAL_TOC, \
- "Use only one TOC entry per procedure"}, \
- {"minimal-toc", - (MASK_NO_FP_IN_TOC | MASK_NO_SUM_IN_TOC), \
- ""}, \
- {"no-minimal-toc", - MASK_MINIMAL_TOC, \
- N_("Place variable addresses in the regular TOC")},\
- {"hard-float", - MASK_SOFT_FLOAT, \
- N_("Use hardware floating point")}, \
- {"soft-float", MASK_SOFT_FLOAT, \
- N_("Do not use hardware floating point")}, \
- {"multiple", MASK_MULTIPLE, \
- N_("Generate load/store multiple instructions")}, \
- {"no-multiple", - MASK_MULTIPLE, \
- N_("Do not generate load/store multiple instructions")},\
- {"string", MASK_STRING, \
- N_("Generate string instructions for block moves")},\
- {"no-string", - MASK_STRING, \
- N_("Do not generate string instructions for block moves")},\
- {"update", - MASK_NO_UPDATE, \
- N_("Generate load/store with update instructions")},\
- {"no-update", MASK_NO_UPDATE, \
- N_("Do not generate load/store with update instructions")},\
- {"fused-madd", - MASK_NO_FUSED_MADD, \
- N_("Generate fused multiply/add instructions")},\
- {"no-fused-madd", MASK_NO_FUSED_MADD, \
- N_("Do not generate fused multiply/add instructions")},\
- {"sched-prolog", MASK_SCHED_PROLOG, \
- ""}, \
- {"no-sched-prolog", -MASK_SCHED_PROLOG, \
- N_("Do not schedule the start and end of the procedure")},\
- {"sched-epilog", MASK_SCHED_PROLOG, \
- ""}, \
- {"no-sched-epilog", -MASK_SCHED_PROLOG, \
- ""}, \
- {"aix-struct-return", MASK_AIX_STRUCT_RET, \
- N_("Return all structures in memory (AIX default)")},\
- {"svr4-struct-return", - MASK_AIX_STRUCT_RET, \
- N_("Return small structures in registers (SVR4 default)")},\
- {"no-aix-struct-return", - MASK_AIX_STRUCT_RET, \
- ""}, \
- {"no-svr4-struct-return", MASK_AIX_STRUCT_RET, \
- ""}, \
- {"mfcrf", MASK_MFCRF, \
- N_("Generate single field mfcr instruction")}, \
- {"no-mfcrf", - MASK_MFCRF, \
- N_("Do not generate single field mfcr instruction")},\
- SUBTARGET_SWITCHES \
- {"", TARGET_DEFAULT | MASK_SCHED_PROLOG, \
- ""}}
-
#define TARGET_DEFAULT (MASK_POWER | MASK_MULTIPLE | MASK_STRING)
-/* This is meant to be redefined in the host dependent files */
-#define SUBTARGET_SWITCHES
-
/* Processor type. Order must match cpu attribute in MD file. */
enum processor_type
{
@@ -438,46 +259,6 @@ enum group_termination
previous_group
};
-/* This is meant to be overridden in target specific files. */
-#define SUBTARGET_OPTIONS
-
-#define TARGET_OPTIONS \
-{ \
- {"cpu=", &rs6000_select[1].string, \
- N_("Use features of and schedule code for given CPU"), 0}, \
- {"tune=", &rs6000_select[2].string, \
- N_("Schedule code for given CPU"), 0}, \
- {"debug=", &rs6000_debug_name, N_("Enable debug output"), 0}, \
- {"traceback=", &rs6000_traceback_name, \
- N_("Select full, part, or no traceback table"), 0}, \
- {"abi=", &rs6000_abi_string, N_("Specify ABI to use"), 0}, \
- {"long-double-", &rs6000_long_double_size_string, \
- N_("Specify size of long double (64 or 128 bits)"), 0}, \
- {"isel=", &rs6000_isel_string, \
- N_("Specify yes/no if isel instructions should be generated"), 0}, \
- {"spe=", &rs6000_spe_string, \
- N_("Specify yes/no if SPE SIMD instructions should be generated"), 0},\
- {"float-gprs=", &rs6000_float_gprs_string, \
- N_("Specify yes/no if using floating point in the GPRs"), 0}, \
- {"vrsave=", &rs6000_altivec_vrsave_string, \
- N_("Specify yes/no if VRSAVE instructions should be generated for AltiVec"), 0}, \
- {"longcall", &rs6000_longcall_switch, \
- N_("Avoid all range limits on call instructions"), 0}, \
- {"no-longcall", &rs6000_longcall_switch, "", 0}, \
- {"warn-altivec-long", &rs6000_warn_altivec_long_switch, \
- N_("Warn about deprecated 'vector long ...' AltiVec type usage"), 0}, \
- {"no-warn-altivec-long", &rs6000_warn_altivec_long_switch, "", 0}, \
- {"sched-costly-dep=", &rs6000_sched_costly_dep_str, \
- N_("Determine which dependences between insns are considered costly"), 0}, \
- {"insert-sched-nops=", &rs6000_sched_insert_nops_str, \
- N_("Specify which post scheduling nop insertion scheme to apply"), 0}, \
- {"align-", &rs6000_alignment_string, \
- N_("Specify alignment of structure fields default/natural"), 0}, \
- {"prioritize-restricted-insns=", &rs6000_sched_restricted_insns_priority_str, \
- N_("Specify scheduling priority for dispatch slot restricted insns"), 0}, \
- SUBTARGET_OPTIONS \
-}
-
/* Support for a compile-time default CPU, et cetera. The rules are:
--with-cpu is ignored if -mcpu is specified.
--with-tune is ignored if -mtune is specified.
@@ -501,7 +282,6 @@ extern struct rs6000_cpu_select rs6000_select[];
/* Debug support */
extern const char *rs6000_debug_name; /* Name for -mdebug-xxxx option */
-extern const char *rs6000_abi_string; /* for -mabi={sysv,darwin,eabi,aix,altivec} */
extern int rs6000_debug_stack; /* debug stack applications */
extern int rs6000_debug_arg; /* debug argument handling */
@@ -512,32 +292,15 @@ extern const char *rs6000_traceback_name; /* Type of traceback table. */
/* These are separate from target_flags because we've run out of bits
there. */
-extern const char *rs6000_long_double_size_string;
extern int rs6000_long_double_type_size;
+extern int rs6000_ieeequad;
extern int rs6000_altivec_abi;
extern int rs6000_spe_abi;
-extern int rs6000_isel;
-extern int rs6000_spe;
extern int rs6000_float_gprs;
-extern const char *rs6000_float_gprs_string;
-extern const char *rs6000_isel_string;
-extern const char *rs6000_spe_string;
-extern const char *rs6000_altivec_vrsave_string;
-extern int rs6000_altivec_vrsave;
-extern const char *rs6000_longcall_switch;
-extern int rs6000_default_long_calls;
-extern const char* rs6000_alignment_string;
extern int rs6000_alignment_flags;
-extern const char *rs6000_sched_restricted_insns_priority_str;
-extern int rs6000_sched_restricted_insns_priority;
-extern const char *rs6000_sched_costly_dep_str;
-extern enum rs6000_dependence_cost rs6000_sched_costly_dep;
extern const char *rs6000_sched_insert_nops_str;
extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
-extern int rs6000_warn_altivec_long;
-extern const char *rs6000_warn_altivec_long_switch;
-
/* Alignment options for fields in structures for sub-targets following
AIX-like ABI.
ALIGN_POWER word-aligns FP doubles (default AIX ABI).
@@ -546,7 +309,7 @@ extern const char *rs6000_warn_altivec_long_switch;
Override the macro definitions when compiling libobjc to avoid undefined
reference to rs6000_alignment_flags due to library's use of GCC alignment
macros which use the macros below. */
-
+
#ifndef IN_TARGET_LIBS
#define MASK_ALIGN_POWER 0x00000000
#define MASK_ALIGN_NATURAL 0x00000001
@@ -556,14 +319,19 @@ extern const char *rs6000_warn_altivec_long_switch;
#endif
#define TARGET_LONG_DOUBLE_128 (rs6000_long_double_type_size == 128)
+#define TARGET_IEEEQUAD rs6000_ieeequad
#define TARGET_ALTIVEC_ABI rs6000_altivec_abi
-#define TARGET_ALTIVEC_VRSAVE rs6000_altivec_vrsave
#define TARGET_SPE_ABI 0
#define TARGET_SPE 0
#define TARGET_E500 0
#define TARGET_ISEL 0
#define TARGET_FPRS 1
+#define TARGET_E500_SINGLE 0
+#define TARGET_E500_DOUBLE 0
+
+/* E500 processors only support plain "sync", not lwsync. */
+#define TARGET_NO_LWSYNC TARGET_E500
/* Sometimes certain combinations of command options do not make sense
on a particular target machine. You can define a macro
@@ -587,6 +355,7 @@ extern const char *rs6000_warn_altivec_long_switch;
/* Target pragma. */
#define REGISTER_TARGET_PRAGMAS() do { \
c_register_pragma (0, "longcall", rs6000_pragma_longcall); \
+ targetm.resolve_overloaded_builtin = altivec_resolve_overloaded_builtin; \
} while (0)
/* Target #defines. */
@@ -682,7 +451,6 @@ extern const char *rs6000_warn_altivec_long_switch;
target machine. If you don't define this, the default is one
word. */
#define LONG_TYPE_SIZE (TARGET_32BIT ? 32 : 64)
-#define MAX_LONG_TYPE_SIZE 64
/* A C expression for the size in bits of the type `long long' on the
target machine. If you don't define this, the default is two
@@ -704,9 +472,6 @@ extern const char *rs6000_warn_altivec_long_switch;
words. */
#define LONG_DOUBLE_TYPE_SIZE rs6000_long_double_type_size
-/* Constant which presents upper bound of the above value. */
-#define MAX_LONG_DOUBLE_TYPE_SIZE 128
-
/* Define this to set long double type size to use in libgcc2.c, which can
not depend on target_flags. */
#ifdef __LONG_DOUBLE_128__
@@ -740,7 +505,9 @@ extern const char *rs6000_warn_altivec_long_switch;
that the object would ordinarily have. */
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 : \
- (TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE) ? 64 : ALIGN)
+ (TARGET_E500_DOUBLE && TYPE_MODE (TYPE) == DFmode) ? 64 : \
+ (TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE \
+ && SPE_VECTOR_MODE (TYPE_MODE (TYPE))) ? 64 : ALIGN)
/* Alignment of field after `int : 0' in a structure. */
#define EMPTY_FIELD_BOUNDARY 32
@@ -756,9 +523,13 @@ extern const char *rs6000_warn_altivec_long_switch;
store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
back-end. Because a single GPR can hold a V2SI, but not a DI, the
best thing to do is set structs to BLKmode and avoid Severe Tire
- Damage. */
+ Damage.
+
+ On e500 v2, DF and DI modes suffer from the same anomaly. DF can
+ fit into 1, whereas DI still needs two. */
#define MEMBER_TYPE_FORCES_BLK(FIELD, MODE) \
- (TARGET_SPE && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE)
+ ((TARGET_SPE && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
+ || (TARGET_E500_DOUBLE && (MODE) == DFmode))
/* A bit-field declared as `int' forces `int' alignment for the struct. */
#define PCC_BITFIELD_TYPE_MATTERS 1
@@ -772,9 +543,11 @@ extern const char *rs6000_warn_altivec_long_switch;
: (ALIGN))
/* Make arrays of chars word-aligned for the same reasons.
- Align vectors to 128 bits. */
+ Align vectors to 128 bits. Align SPE vectors and E500 v2 doubles to
+ 64 bits. */
#define DATA_ALIGNMENT(TYPE, ALIGN) \
(TREE_CODE (TYPE) == VECTOR_TYPE ? (TARGET_SPE_ABI ? 64 : 128) \
+ : (TARGET_E500_DOUBLE && TYPE_MODE (TYPE) == DFmode) ? 64 \
: TREE_CODE (TYPE) == ARRAY_TYPE \
&& TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
&& (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
@@ -813,15 +586,18 @@ extern const char *rs6000_warn_altivec_long_switch;
We also create a pseudo register for float/int conversions, that will
really represent the memory location used. It is represented here as
a register, in order to work around problems in allocating stack storage
- in inline functions. */
+ in inline functions.
-#define FIRST_PSEUDO_REGISTER 113
+ Another pseudo (not included in DWARF_FRAME_REGISTERS) is soft frame
+ pointer, which is eventually eliminated in favor of SP or FP. */
+
+#define FIRST_PSEUDO_REGISTER 114
/* This must be included for pre gcc 3.0 glibc compatibility. */
#define PRE_GCC3_DWARF_FRAME_REGISTERS 77
/* Add 32 dwarf columns for synthetic SPE registers. */
-#define DWARF_FRAME_REGISTERS (FIRST_PSEUDO_REGISTER + 32)
+#define DWARF_FRAME_REGISTERS ((FIRST_PSEUDO_REGISTER - 1) + 32)
/* The SPE has an additional 32 synthetic registers, with DWARF debug
info numbering for these registers starting at 1200. While eh_frame
@@ -835,13 +611,28 @@ extern const char *rs6000_warn_altivec_long_switch;
avoid invalidating older SPE eh_frame info.
We must map them here to avoid huge unwinder tables mostly consisting
- of unused space. */
+ of unused space. */
#define DWARF_REG_TO_UNWIND_COLUMN(r) \
- ((r) > 1200 ? ((r) - 1200 + FIRST_PSEUDO_REGISTER) : (r))
+ ((r) > 1200 ? ((r) - 1200 + FIRST_PSEUDO_REGISTER - 1) : (r))
+
+/* Use standard DWARF numbering for DWARF debugging information. */
+#define DBX_REGISTER_NUMBER(REGNO) rs6000_dbx_register_number (REGNO)
/* Use gcc hard register numbering for eh_frame. */
#define DWARF_FRAME_REGNUM(REGNO) (REGNO)
+/* Map register numbers held in the call frame info that gcc has
+ collected using DWARF_FRAME_REGNUM to those that should be output in
+ .debug_frame and .eh_frame. We continue to use gcc hard reg numbers
+ for .eh_frame, but use the numbers mandated by the various ABIs for
+ .debug_frame. rs6000_emit_prologue has translated any combination of
+ CR2, CR3, CR4 saves to a save of CR2. The actual code emitted saves
+ the whole of CR, so we map CR2_REGNO to the DWARF reg for CR. */
+#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) \
+ ((FOR_EH) ? (REGNO) \
+ : (REGNO) == CR2_REGNO ? 64 \
+ : DBX_REGISTER_NUMBER (REGNO))
+
/* 1 for registers that have pervasive standard uses
and are not available for the register allocator.
@@ -862,7 +653,7 @@ extern const char *rs6000_warn_altivec_long_switch;
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
1, 1 \
- , 1, 1 \
+ , 1, 1, 1 \
}
/* 1 for registers not available across function calls.
@@ -882,7 +673,7 @@ extern const char *rs6000_warn_altivec_long_switch;
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
1, 1 \
- , 1, 1 \
+ , 1, 1, 1 \
}
/* Like `CALL_USED_REGISTERS' except this macro doesn't require that
@@ -890,7 +681,7 @@ extern const char *rs6000_warn_altivec_long_switch;
(`CALL_USED_REGISTERS' must be a superset of `FIXED_REGISTERS').
This macro is optional. If not specified, it defaults to the value
of `CALL_USED_REGISTERS'. */
-
+
#define CALL_REALLY_USED_REGISTERS \
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
@@ -901,7 +692,7 @@ extern const char *rs6000_warn_altivec_long_switch;
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0 \
- , 0, 0 \
+ , 0, 0, 0 \
}
#define MQ_REGNO 64
@@ -920,6 +711,10 @@ extern const char *rs6000_warn_altivec_long_switch;
#define SPE_ACC_REGNO 111
#define SPEFSCR_REGNO 112
+#define FIRST_SAVED_ALTIVEC_REGNO (FIRST_ALTIVEC_REGNO+20)
+#define FIRST_SAVED_FP_REGNO (14+32)
+#define FIRST_SAVED_GP_REGNO 13
+
/* List the order in which to allocate registers. Each register must be
listed once, even those in FIXED_REGISTERS.
@@ -927,31 +722,31 @@ extern const char *rs6000_warn_altivec_long_switch;
fp0 (not saved or used for anything)
fp13 - fp2 (not saved; incoming fp arg registers)
fp1 (not saved; return value)
- fp31 - fp14 (saved; order given to save least number)
+ fp31 - fp14 (saved; order given to save least number)
cr7, cr6 (not saved or special)
cr1 (not saved, but used for FP operations)
cr0 (not saved, but used for arithmetic operations)
cr4, cr3, cr2 (saved)
- r0 (not saved; cannot be base reg)
+ r0 (not saved; cannot be base reg)
r9 (not saved; best for TImode)
r11, r10, r8-r4 (not saved; highest used first to make less conflict)
- r3 (not saved; return value register)
+ r3 (not saved; return value register)
r31 - r13 (saved; order given to save least number)
r12 (not saved; if used for DImode or DFmode would use r13)
mq (not saved; best to use it if we can)
ctr (not saved; when we have the choice ctr is better)
lr (saved)
- cr5, r1, r2, ap, xer, vrsave, vscr (fixed)
+ cr5, r1, r2, ap, xer (fixed)
+ v0 - v1 (not saved or used for anything)
+ v13 - v3 (not saved; incoming vector arg registers)
+ v2 (not saved; incoming vector arg reg; return value)
+ v19 - v14 (not saved or used for anything)
+ v31 - v20 (saved; order given to save least number)
+ vrsave, vscr (fixed)
spe_acc, spefscr (fixed)
-
- AltiVec registers:
- v0 - v1 (not saved or used for anything)
- v13 - v3 (not saved; incoming vector arg registers)
- v2 (not saved; incoming vector arg reg; return value)
- v19 - v14 (not saved or used for anything)
- v31 - v20 (saved; order given to save least number)
+ sfp (fixed)
*/
-
+
#if FIXED_R2 == 1
#define MAYBE_R2_AVAILABLE
#define MAYBE_R2_FIXED 2,
@@ -960,28 +755,28 @@ extern const char *rs6000_warn_altivec_long_switch;
#define MAYBE_R2_FIXED
#endif
-#define REG_ALLOC_ORDER \
- {32, \
- 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, \
- 33, \
- 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, \
- 50, 49, 48, 47, 46, \
- 75, 74, 69, 68, 72, 71, 70, \
- 0, MAYBE_R2_AVAILABLE \
- 9, 11, 10, 8, 7, 6, 5, 4, \
- 3, \
- 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, \
- 18, 17, 16, 15, 14, 13, 12, \
- 64, 66, 65, \
- 73, 1, MAYBE_R2_FIXED 67, 76, \
- /* AltiVec registers. */ \
- 77, 78, \
- 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, \
- 79, \
- 96, 95, 94, 93, 92, 91, \
- 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, \
- 97, 109, 110 \
- , 111, 112 \
+#define REG_ALLOC_ORDER \
+ {32, \
+ 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, \
+ 33, \
+ 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, \
+ 50, 49, 48, 47, 46, \
+ 75, 74, 69, 68, 72, 71, 70, \
+ 0, MAYBE_R2_AVAILABLE \
+ 9, 11, 10, 8, 7, 6, 5, 4, \
+ 3, \
+ 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, \
+ 18, 17, 16, 15, 14, 13, 12, \
+ 64, 66, 65, \
+ 73, 1, MAYBE_R2_FIXED 67, 76, \
+ /* AltiVec registers. */ \
+ 77, 78, \
+ 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, \
+ 79, \
+ 96, 95, 94, 93, 92, 91, \
+ 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, \
+ 109, 110, \
+ 111, 112, 113 \
}
/* True if register is floating-point. */
@@ -994,7 +789,8 @@ extern const char *rs6000_warn_altivec_long_switch;
#define CR_REGNO_NOT_CR0_P(N) ((N) >= 69 && (N) <= 75)
/* True if register is an integer register. */
-#define INT_REGNO_P(N) ((N) <= 31 || (N) == ARG_POINTER_REGNUM)
+#define INT_REGNO_P(N) \
+ ((N) <= 31 || (N) == ARG_POINTER_REGNUM || (N) == FRAME_POINTER_REGNUM)
/* SPE SIMD registers are just the GPRs. */
#define SPE_SIMD_REGNO_P(N) ((N) <= 31)
@@ -1006,29 +802,13 @@ extern const char *rs6000_warn_altivec_long_switch;
#define ALTIVEC_REGNO_P(N) ((N) >= FIRST_ALTIVEC_REGNO && (N) <= LAST_ALTIVEC_REGNO)
/* Return number of consecutive hard regs needed starting at reg REGNO
- to hold something of mode MODE.
- This is ordinarily the length in words of a value of mode MODE
- but can be less for certain modes in special long registers.
-
- For the SPE, GPRs are 64 bits but only 32 bits are visible in
- scalar instructions. The upper 32 bits are only available to the
- SIMD instructions.
-
- POWER and PowerPC GPRs hold 32 bits worth;
- PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
-
-#define HARD_REGNO_NREGS(REGNO, MODE) \
- (FP_REGNO_P (REGNO) \
- ? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
- : (SPE_SIMD_REGNO_P (REGNO) && TARGET_SPE && SPE_VECTOR_MODE (MODE)) \
- ? ((GET_MODE_SIZE (MODE) + UNITS_PER_SPE_WORD - 1) / UNITS_PER_SPE_WORD) \
- : ALTIVEC_REGNO_P (REGNO) \
- ? ((GET_MODE_SIZE (MODE) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD) \
- : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+ to hold something of mode MODE. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) rs6000_hard_regno_nregs ((REGNO), (MODE))
#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
((TARGET_32BIT && TARGET_POWERPC64 \
- && (MODE == DImode || MODE == DFmode) \
+ && (GET_MODE_SIZE (MODE) > 4) \
&& INT_REGNO_P (REGNO)) ? 1 : 0)
#define ALTIVEC_VECTOR_MODE(MODE) \
@@ -1043,44 +823,24 @@ extern const char *rs6000_warn_altivec_long_switch;
|| (MODE) == V1DImode \
|| (MODE) == V2SImode)
-/* Define this macro to be nonzero if the port is prepared to handle
- insns involving vector mode MODE. At the very least, it must have
- move patterns for this mode. */
-
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- ((TARGET_SPE && SPE_VECTOR_MODE (MODE)) \
- || (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (MODE)))
-
-/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
- For POWER and PowerPC, the GPRs can hold any mode, but values bigger
- than one register cannot go past R31. The float
- registers only can hold floating modes and DImode, and CR register only
- can hold CC modes. We cannot put TImode anywhere except general
- register and it must be able to fit within the register set. */
-
-#define HARD_REGNO_MODE_OK(REGNO, MODE) \
- (INT_REGNO_P (REGNO) ? \
- INT_REGNO_P (REGNO + HARD_REGNO_NREGS (REGNO, MODE) - 1) \
- : FP_REGNO_P (REGNO) ? \
- ((GET_MODE_CLASS (MODE) == MODE_FLOAT \
- && FP_REGNO_P (REGNO + HARD_REGNO_NREGS (REGNO, MODE) - 1)) \
- || (GET_MODE_CLASS (MODE) == MODE_INT \
- && GET_MODE_SIZE (MODE) == UNITS_PER_FP_WORD)) \
- : ALTIVEC_REGNO_P (REGNO) ? ALTIVEC_VECTOR_MODE (MODE) \
- : SPE_SIMD_REGNO_P (REGNO) && TARGET_SPE && SPE_VECTOR_MODE (MODE) ? 1 \
- : CR_REGNO_P (REGNO) ? GET_MODE_CLASS (MODE) == MODE_CC \
- : XER_REGNO_P (REGNO) ? (MODE) == PSImode \
- : GET_MODE_SIZE (MODE) <= UNITS_PER_WORD)
+#define UNITS_PER_SIMD_WORD \
+ (TARGET_ALTIVEC ? UNITS_PER_ALTIVEC_WORD \
+ : (TARGET_SPE ? UNITS_PER_SPE_WORD : UNITS_PER_WORD))
+
+/* Value is TRUE if hard register REGNO can hold a value of
+ machine-mode MODE. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ rs6000_hard_regno_mode_ok_p[(int)(MODE)][REGNO]
/* Value is 1 if it is a good idea to tie two pseudo registers
when one has mode MODE1 and one has mode MODE2.
If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
for any hard reg, then this must be 0 for correct output. */
#define MODES_TIEABLE_P(MODE1, MODE2) \
- (GET_MODE_CLASS (MODE1) == MODE_FLOAT \
- ? GET_MODE_CLASS (MODE2) == MODE_FLOAT \
- : GET_MODE_CLASS (MODE2) == MODE_FLOAT \
- ? GET_MODE_CLASS (MODE1) == MODE_FLOAT \
+ (SCALAR_FLOAT_MODE_P (MODE1) \
+ ? SCALAR_FLOAT_MODE_P (MODE2) \
+ : SCALAR_FLOAT_MODE_P (MODE2) \
+ ? SCALAR_FLOAT_MODE_P (MODE1) \
: GET_MODE_CLASS (MODE1) == MODE_CC \
? GET_MODE_CLASS (MODE2) == MODE_CC \
: GET_MODE_CLASS (MODE2) == MODE_CC \
@@ -1120,9 +880,9 @@ extern const char *rs6000_warn_altivec_long_switch;
#define BRANCH_COST 3
/* Override BRANCH_COST heuristic which empirically produces worse
- performance for fold_range_test(). */
+ performance for removing short circuiting from the logical ops. */
-#define RANGE_TEST_NON_SHORT_CIRCUIT 0
+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
/* A fixed register used at prologue and epilogue generation to fix
addressing modes. The SPE needs heavy addressing fixes at the last
@@ -1137,62 +897,10 @@ extern const char *rs6000_warn_altivec_long_switch;
#define FIXED_SCRATCH (TARGET_SPE ? 14 : 11)
-/* Define this macro to change register usage conditional on target flags.
- Set MQ register fixed (already call_used) if not POWER architecture
- (RIOS1, RIOS2, RSC, and PPC601) so that it will not be allocated.
- 64-bit AIX reserves GPR13 for thread-private data.
- Conditionally disable FPRs. */
+/* Define this macro to change register usage conditional on target
+ flags. */
-#define CONDITIONAL_REGISTER_USAGE \
-{ \
- int i; \
- if (! TARGET_POWER) \
- fixed_regs[64] = 1; \
- if (TARGET_64BIT) \
- fixed_regs[13] = call_used_regs[13] \
- = call_really_used_regs[13] = 1; \
- if (TARGET_SOFT_FLOAT || !TARGET_FPRS) \
- for (i = 32; i < 64; i++) \
- fixed_regs[i] = call_used_regs[i] \
- = call_really_used_regs[i] = 1; \
- if (DEFAULT_ABI == ABI_V4 \
- && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM \
- && flag_pic == 2) \
- fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
- if (DEFAULT_ABI == ABI_V4 \
- && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM \
- && flag_pic == 1) \
- fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
- = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
- = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
- if (DEFAULT_ABI == ABI_DARWIN \
- && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
- global_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
- = fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
- = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
- = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
- if (TARGET_TOC && TARGET_MINIMAL_TOC) \
- fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
- = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
- if (TARGET_ALTIVEC) \
- global_regs[VSCR_REGNO] = 1; \
- if (TARGET_SPE) \
- { \
- global_regs[SPEFSCR_REGNO] = 1; \
- fixed_regs[FIXED_SCRATCH] \
- = call_used_regs[FIXED_SCRATCH] \
- = call_really_used_regs[FIXED_SCRATCH] = 1; \
- } \
- if (! TARGET_ALTIVEC) \
- { \
- for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i) \
- fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1; \
- call_really_used_regs[VRSAVE_REGNO] = 1; \
- } \
- if (TARGET_ALTIVEC_ABI) \
- for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i) \
- call_used_regs[i] = call_really_used_regs[i] = 1; \
-}
+#define CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage ()
/* Specify the registers used for certain standard purposes.
The values of these macros are register numbers. */
@@ -1204,7 +912,10 @@ extern const char *rs6000_warn_altivec_long_switch;
#define STACK_POINTER_REGNUM 1
/* Base register for access to local variables of the function. */
-#define FRAME_POINTER_REGNUM 31
+#define HARD_FRAME_POINTER_REGNUM 31
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 113
/* Value should be nonzero if functions must have frame pointers.
Zero means the frame pointer need not be set up (and parms
@@ -1316,26 +1027,26 @@ enum reg_class
#define REG_CLASS_CONTENTS \
{ \
{ 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
- { 0xfffffffe, 0x00000000, 0x00000008, 0x00000000 }, /* BASE_REGS */ \
- { 0xffffffff, 0x00000000, 0x00000008, 0x00000000 }, /* GENERAL_REGS */ \
+ { 0xfffffffe, 0x00000000, 0x00000008, 0x00020000 }, /* BASE_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000008, 0x00020000 }, /* GENERAL_REGS */ \
{ 0x00000000, 0xffffffff, 0x00000000, 0x00000000 }, /* FLOAT_REGS */ \
{ 0x00000000, 0x00000000, 0xffffe000, 0x00001fff }, /* ALTIVEC_REGS */ \
{ 0x00000000, 0x00000000, 0x00000000, 0x00002000 }, /* VRSAVE_REGS */ \
{ 0x00000000, 0x00000000, 0x00000000, 0x00004000 }, /* VSCR_REGS */ \
{ 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, /* SPE_ACC_REGS */ \
{ 0x00000000, 0x00000000, 0x00000000, 0x00010000 }, /* SPEFSCR_REGS */ \
- { 0xffffffff, 0xffffffff, 0x00000008, 0x00000000 }, /* NON_SPECIAL_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000008, 0x00020000 }, /* NON_SPECIAL_REGS */ \
{ 0x00000000, 0x00000000, 0x00000001, 0x00000000 }, /* MQ_REGS */ \
{ 0x00000000, 0x00000000, 0x00000002, 0x00000000 }, /* LINK_REGS */ \
{ 0x00000000, 0x00000000, 0x00000004, 0x00000000 }, /* CTR_REGS */ \
{ 0x00000000, 0x00000000, 0x00000006, 0x00000000 }, /* LINK_OR_CTR_REGS */ \
{ 0x00000000, 0x00000000, 0x00000007, 0x00002000 }, /* SPECIAL_REGS */ \
- { 0xffffffff, 0x00000000, 0x0000000f, 0x00000000 }, /* SPEC_OR_GEN_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000000f, 0x00022000 }, /* SPEC_OR_GEN_REGS */ \
{ 0x00000000, 0x00000000, 0x00000010, 0x00000000 }, /* CR0_REGS */ \
{ 0x00000000, 0x00000000, 0x00000ff0, 0x00000000 }, /* CR_REGS */ \
- { 0xffffffff, 0x00000000, 0x0000efff, 0x00000000 }, /* NON_FLOAT_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000efff, 0x00020000 }, /* NON_FLOAT_REGS */ \
{ 0x00000000, 0x00000000, 0x00001000, 0x00000000 }, /* XER_REGS */ \
- { 0xffffffff, 0xffffffff, 0xffffffff, 0x00003fff } /* ALL_REGS */ \
+ { 0xffffffff, 0xffffffff, 0xffffffff, 0x0003ffff } /* ALL_REGS */ \
}
/* The same information, inverted:
@@ -1356,115 +1067,27 @@ enum reg_class
: (REGNO) == ARG_POINTER_REGNUM ? BASE_REGS \
: (REGNO) == XER_REGNO ? XER_REGS \
: (REGNO) == VRSAVE_REGNO ? VRSAVE_REGS \
- : (REGNO) == VSCR_REGNO ? VRSAVE_REGS \
+ : (REGNO) == VSCR_REGNO ? VRSAVE_REGS \
: (REGNO) == SPE_ACC_REGNO ? SPE_ACC_REGS \
: (REGNO) == SPEFSCR_REGNO ? SPEFSCR_REGS \
+ : (REGNO) == FRAME_POINTER_REGNUM ? BASE_REGS \
: NO_REGS)
/* The class value for index registers, and the one for base regs. */
#define INDEX_REG_CLASS GENERAL_REGS
#define BASE_REG_CLASS BASE_REGS
-/* Get reg_class from a letter such as appears in the machine description. */
-
-#define REG_CLASS_FROM_LETTER(C) \
- ((C) == 'f' ? FLOAT_REGS \
- : (C) == 'b' ? BASE_REGS \
- : (C) == 'h' ? SPECIAL_REGS \
- : (C) == 'q' ? MQ_REGS \
- : (C) == 'c' ? CTR_REGS \
- : (C) == 'l' ? LINK_REGS \
- : (C) == 'v' ? ALTIVEC_REGS \
- : (C) == 'x' ? CR0_REGS \
- : (C) == 'y' ? CR_REGS \
- : (C) == 'z' ? XER_REGS \
- : NO_REGS)
-
-/* The letters I, J, K, L, M, N, and P in a register constraint string
- can be used to stand for particular ranges of immediate operands.
- This macro defines what the ranges are.
- C is the letter, and VALUE is a constant value.
- Return 1 if VALUE is in the range specified by C.
-
- `I' is a signed 16-bit constant
- `J' is a constant with only the high-order 16 bits nonzero
- `K' is a constant with only the low-order 16 bits nonzero
- `L' is a signed 16-bit constant shifted left 16 bits
- `M' is a constant that is greater than 31
- `N' is a positive constant that is an exact power of two
- `O' is the constant zero
- `P' is a constant whose negation is a signed 16-bit constant */
-
-#define CONST_OK_FOR_LETTER_P(VALUE, C) \
- ( (C) == 'I' ? (unsigned HOST_WIDE_INT) ((VALUE) + 0x8000) < 0x10000 \
- : (C) == 'J' ? ((VALUE) & (~ (unsigned HOST_WIDE_INT) 0xffff0000)) == 0 \
- : (C) == 'K' ? ((VALUE) & (~ (HOST_WIDE_INT) 0xffff)) == 0 \
- : (C) == 'L' ? (((VALUE) & 0xffff) == 0 \
- && ((VALUE) >> 31 == -1 || (VALUE) >> 31 == 0)) \
- : (C) == 'M' ? (VALUE) > 31 \
- : (C) == 'N' ? (VALUE) > 0 && exact_log2 (VALUE) >= 0 \
- : (C) == 'O' ? (VALUE) == 0 \
- : (C) == 'P' ? (unsigned HOST_WIDE_INT) ((- (VALUE)) + 0x8000) < 0x10000 \
- : 0)
-
-/* Similar, but for floating constants, and defining letters G and H.
- Here VALUE is the CONST_DOUBLE rtx itself.
-
- We flag for special constants when we can copy the constant into
- a general register in two insns for DF/DI and one insn for SF.
-
- 'H' is used for DI/DF constants that take 3 insns. */
-
-#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
- ( (C) == 'G' ? (num_insns_constant (VALUE, GET_MODE (VALUE)) \
- == ((GET_MODE (VALUE) == SFmode) ? 1 : 2)) \
- : (C) == 'H' ? (num_insns_constant (VALUE, GET_MODE (VALUE)) == 3) \
- : 0)
-
-/* Optional extra constraints for this machine.
-
- 'Q' means that is a memory operand that is just an offset from a reg.
- 'R' is for AIX TOC entries.
- 'S' is a constant that can be placed into a 64-bit mask operand
- 'T' is a constant that can be placed into a 32-bit mask operand
- 'U' is for V.4 small data references.
- 'W' is a vector constant that can be easily generated (no mem refs).
- 'Y' is a indexed or word-aligned displacement memory operand.
- 't' is for AND masks that can be performed by two rldic{l,r} insns. */
-
-#define EXTRA_CONSTRAINT(OP, C) \
- ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
- : (C) == 'R' ? legitimate_constant_pool_address_p (OP) \
- : (C) == 'S' ? mask64_operand (OP, DImode) \
- : (C) == 'T' ? mask_operand (OP, SImode) \
- : (C) == 'U' ? (DEFAULT_ABI == ABI_V4 \
- && small_data_operand (OP, GET_MODE (OP))) \
- : (C) == 't' ? (mask64_2_operand (OP, DImode) \
- && (fixed_regs[CR0_REGNO] \
- || !logical_operand (OP, DImode)) \
- && !mask64_operand (OP, DImode)) \
- : (C) == 'W' ? (easy_vector_constant (OP, GET_MODE (OP))) \
- : (C) == 'Y' ? (word_offset_memref_operand (OP, GET_MODE (OP))) \
- : 0)
-
-/* Defining, which contraints are memory contraints. Tells reload,
- that any memory address can be reloaded by copying the
- memory address into a base register if required. */
-
-#define EXTRA_MEMORY_CONSTRAINT(C, STR) \
- ((C) == 'Q' || (C) == 'Y')
-
/* Given an rtx X being reloaded into a reg required to be
in class CLASS, return the class of reg to actually use.
In general this is just CLASS; but on some machines
in some cases it is preferable to use a more restrictive class.
On the RS/6000, we have to return NO_REGS when we want to reload a
- floating-point CONST_DOUBLE to force it to be copied to memory.
+ floating-point CONST_DOUBLE to force it to be copied to memory.
We also don't want to reload integer values into floating-point
registers if we can at all help it. In fact, this can
- cause reload to abort, if it tries to generate a reload of CTR
+ cause reload to die, if it tries to generate a reload of CTR
into a FP register and discovers it doesn't have the memory location
required.
@@ -1473,27 +1096,20 @@ enum reg_class
*/
#define PREFERRED_RELOAD_CLASS(X,CLASS) \
- (((GET_CODE (X) == CONST_DOUBLE \
- && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT) \
- ? NO_REGS \
- : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
- && (CLASS) == NON_SPECIAL_REGS) \
- ? GENERAL_REGS \
- : (CLASS)))
-
-#define DISPARAGE_RELOAD_CLASS(X, CLASS) \
- (GET_CODE (X) == REG \
- && REGNO (X) < FIRST_PSEUDO_REGISTER \
- && SECONDARY_MEMORY_NEEDED (GET_MODE_CLASS (GET_MODE (X)), \
- CLASS, GET_MODE (X)) \
- ? 6 : 0)
+ ((CONSTANT_P (X) \
+ && reg_classes_intersect_p ((CLASS), FLOAT_REGS)) \
+ ? NO_REGS \
+ : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
+ && (CLASS) == NON_SPECIAL_REGS) \
+ ? GENERAL_REGS \
+ : (CLASS))
/* Return the register class of a scratch register needed to copy IN into
or out of a register in CLASS in MODE. If it can be done directly,
NO_REGS is returned. */
-#define SECONDARY_RELOAD_CLASS(CLASS, MODE, IN) \
- secondary_reload_class (CLASS, MODE, IN)
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,IN) \
+ rs6000_secondary_reload_class (CLASS, MODE, IN)
/* If we are copying between FP or AltiVec registers and anything
else, we need a memory location. */
@@ -1512,20 +1128,23 @@ enum reg_class
#define CLASS_MAX_NREGS(CLASS, MODE) \
(((CLASS) == FLOAT_REGS) \
? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
+ : (TARGET_E500_DOUBLE && (CLASS) == GENERAL_REGS && (MODE) == DFmode) \
+ ? 1 \
: ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+/* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
-/* Return a class of registers that cannot change FROM mode to TO mode. */
-
-#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
- (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) \
- && GET_MODE_SIZE (FROM) >= 8 && GET_MODE_SIZE (TO) >= 8) \
- ? 0 \
- : GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
- ? reg_classes_intersect_p (FLOAT_REGS, CLASS) \
- : (TARGET_SPE && (SPE_VECTOR_MODE (FROM) + SPE_VECTOR_MODE (TO)) == 1) \
- ? reg_classes_intersect_p (GENERAL_REGS, CLASS) \
- : 0)
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
+ ? ((GET_MODE_SIZE (FROM) < 8 || GET_MODE_SIZE (TO) < 8 \
+ || TARGET_IEEEQUAD) \
+ && reg_classes_intersect_p (FLOAT_REGS, CLASS)) \
+ : (((TARGET_E500_DOUBLE \
+ && ((((TO) == DFmode) + ((FROM) == DFmode)) == 1 \
+ || (((TO) == DImode) + ((FROM) == DImode)) == 1)) \
+ || (TARGET_SPE \
+ && (SPE_VECTOR_MODE (FROM) + SPE_VECTOR_MODE (TO)) == 1)) \
+ && reg_classes_intersect_p (GENERAL_REGS, CLASS)))
/* Stack layout; function entry, exit and calling. */
@@ -1546,14 +1165,14 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
/* Offsets recorded in opcodes are a multiple of this alignment factor. */
#define DWARF_CIE_DATA_ALIGNMENT (-((int) (TARGET_32BIT ? 4 : 8)))
-/* Define this if the nominal address of the stack frame
+/* Define this to nonzero if the nominal address of the stack frame
is at the high-address end of the local variables;
that is, each additional local variable allocated
goes at a more negative offset in the frame.
On the RS/6000, we grow upwards, from the area after the outgoing
arguments. */
-/* #define FRAME_GROWS_DOWNWARD */
+#define FRAME_GROWS_DOWNWARD (flag_stack_protect != 0)
/* Size of the outgoing register save area */
#define RS6000_REG_SAVE ((DEFAULT_ABI == ABI_AIX \
@@ -1571,16 +1190,9 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
plus_constant (stack_pointer_rtx, \
(TARGET_32BIT ? 20 : 40)))
-/* Size of the V.4 varargs area if needed */
-#define RS6000_VARARGS_AREA 0
-
/* Align an address */
#define RS6000_ALIGN(n,a) (((n) + (a) - 1) & ~((a) - 1))
-/* Size of V.4 varargs area in bytes */
-#define RS6000_VARARGS_SIZE \
- ((GP_ARG_NUM_REG * (TARGET_32BIT ? 4 : 8)) + (FP_ARG_NUM_REG * 8) + 8)
-
/* Offset within stack frame to start allocating local variables at.
If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
first local allocated. Otherwise, it is the offset to the BEGINNING
@@ -1591,10 +1203,11 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
outgoing parameter area. */
#define STARTING_FRAME_OFFSET \
- (RS6000_ALIGN (current_function_outgoing_args_size, \
- TARGET_ALTIVEC ? 16 : 8) \
- + RS6000_VARARGS_AREA \
- + RS6000_SAVE_AREA)
+ (FRAME_GROWS_DOWNWARD \
+ ? 0 \
+ : (RS6000_ALIGN (current_function_outgoing_args_size, \
+ TARGET_ALTIVEC ? 16 : 8) \
+ + RS6000_SAVE_AREA))
/* Offset from the stack pointer register to an item dynamically
allocated on the stack, e.g., by `alloca'.
@@ -1665,7 +1278,7 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
/* DRAFT_V4_STRUCT_RET defaults off. */
#define DRAFT_V4_STRUCT_RET 0
-/* Let RETURN_IN_MEMORY control what happens. */
+/* Let TARGET_RETURN_IN_MEMORY control what happens. */
#define DEFAULT_PCC_STRUCT_RETURN 0
/* Mode of stack savearea.
@@ -1708,13 +1321,17 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
#define CALL_LONG 0x00000008 /* always call indirect */
#define CALL_LIBCALL 0x00000010 /* libcall */
+/* We don't have prologue and epilogue functions to save/restore
+ everything for most ABIs. */
+#define WORLD_SAVE_P(INFO) 0
+
/* 1 if N is a possible register number for a function value
as seen by the caller.
On RS/6000, this is r3, fp1, and v2 (for AltiVec). */
#define FUNCTION_VALUE_REGNO_P(N) \
((N) == GP_ARG_RETURN \
- || ((N) == FP_ARG_RETURN && TARGET_HARD_FLOAT) \
+ || ((N) == FP_ARG_RETURN && TARGET_HARD_FLOAT && TARGET_FPRS) \
|| ((N) == ALTIVEC_ARG_RETURN && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI))
/* 1 if N is a possible register number for function argument passing.
@@ -1725,24 +1342,8 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
|| ((unsigned) (N) - ALTIVEC_ARG_MIN_REG < ALTIVEC_ARG_NUM_REG \
&& TARGET_ALTIVEC && TARGET_ALTIVEC_ABI) \
|| ((unsigned) (N) - FP_ARG_MIN_REG < FP_ARG_NUM_REG \
- && TARGET_HARD_FLOAT))
+ && TARGET_HARD_FLOAT && TARGET_FPRS))
-/* A C structure for machine-specific, per-function data.
- This is added to the cfun structure. */
-typedef struct machine_function GTY(())
-{
- /* Whether a System V.4 varargs area was created. */
- int sysv_varargs_p;
- /* Flags if __builtin_return_address (n) with n >= 1 was used. */
- int ra_needs_full_frame;
- /* Some local-dynamic symbol. */
- const char *some_ld_name;
- /* Whether the instruction chain has been scanned already. */
- int insn_chain_scanned_p;
- /* Flags if __builtin_return_address (0) was used. */
- int ra_need_lr;
-} machine_function;
-
/* Define a data type for recording info about an argument list
during the scan of that argument list. This data type should
hold all necessary information about the function itself
@@ -1771,6 +1372,9 @@ typedef struct rs6000_args
int stdarg; /* Whether function is a stdarg function. */
int call_cookie; /* Do special things for this call */
int sysv_gregno; /* next available GP register */
+ int intoffset; /* running offset in struct (darwin64) */
+ int use_stack; /* any part of struct on stack (darwin64) */
+ int named; /* false for varargs params */
} CUMULATIVE_ARGS;
/* Initialize a variable CUM of type CUMULATIVE_ARGS
@@ -1796,7 +1400,7 @@ typedef struct rs6000_args
(TYPE is null for libcalls where that information may not be available.) */
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
- function_arg_advance (&CUM, MODE, TYPE, NAMED)
+ function_arg_advance (&CUM, MODE, TYPE, NAMED, 0)
/* Determine where to put an argument to a function.
Value is zero to push the argument on the stack,
@@ -1823,22 +1427,6 @@ typedef struct rs6000_args
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
function_arg (&CUM, MODE, TYPE, NAMED)
-/* For an arg passed partly in registers and partly in memory,
- this is the number of registers used.
- For args passed entirely in registers or entirely in memory, zero. */
-
-#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
- function_arg_partial_nregs (&CUM, MODE, TYPE, NAMED)
-
-/* A C expression that indicates when an argument must be passed by
- reference. If nonzero for an argument, a copy of that argument is
- made in memory and a pointer to the argument is passed instead of
- the argument itself. The pointer is passed in whatever way is
- appropriate for passing a pointer to that type. */
-
-#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
- function_arg_pass_by_reference(&CUM, MODE, TYPE, NAMED)
-
/* If defined, a C expression which determines whether, and in which
direction, to pad out an argument with extra space. The value
should be of type `enum direction': either `upward' to pad above
@@ -1858,17 +1446,9 @@ typedef struct rs6000_args
#define EXPAND_BUILTIN_VA_START(valist, nextarg) \
rs6000_va_start (valist, nextarg)
-/* Implement `va_arg'. */
-#define EXPAND_BUILTIN_VA_ARG(valist, type) \
- rs6000_va_arg (valist, type)
-
#define PAD_VARARGS_DOWN \
(FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
-/* Define this macro to be a nonzero value if the location where a function
- argument is passed depends on whether or not it is a named argument. */
-#define STRICT_ARGUMENT_NAMING 1
-
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
@@ -1951,10 +1531,12 @@ typedef struct rs6000_args
of eliminable registers. The "from" register number is given first,
followed by "to". Eliminations of the same "from" register are listed
in order of preference. */
-#define ELIMINABLE_REGS \
-{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
- { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
- { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+#define ELIMINABLE_REGS \
+{{ HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
{ RS6000_PIC_OFFSET_TABLE_REGNUM, RS6000_PIC_OFFSET_TABLE_REGNUM } }
/* Given FROM and TO register numbers, say whether this elimination is allowed.
@@ -1994,14 +1576,18 @@ typedef struct rs6000_args
#define REGNO_OK_FOR_INDEX_P(REGNO) \
((REGNO) < FIRST_PSEUDO_REGISTER \
? (REGNO) <= 31 || (REGNO) == 67 \
+ || (REGNO) == FRAME_POINTER_REGNUM \
: (reg_renumber[REGNO] >= 0 \
- && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67)))
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67 \
+ || reg_renumber[REGNO] == FRAME_POINTER_REGNUM)))
#define REGNO_OK_FOR_BASE_P(REGNO) \
((REGNO) < FIRST_PSEUDO_REGISTER \
? ((REGNO) > 0 && (REGNO) <= 31) || (REGNO) == 67 \
+ || (REGNO) == FRAME_POINTER_REGNUM \
: (reg_renumber[REGNO] > 0 \
- && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67)))
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67 \
+ || reg_renumber[REGNO] == FRAME_POINTER_REGNUM)))
/* Maximum number of registers that can appear in a valid memory address. */
@@ -2030,6 +1616,11 @@ typedef struct rs6000_args
|| easy_vector_constant (X, GET_MODE (X))) \
&& !rs6000_tls_referenced_p (X))
+#define EASY_VECTOR_15(n) ((n) >= -16 && (n) <= 15)
+#define EASY_VECTOR_15_ADD_SELF(n) (!EASY_VECTOR_15((n)) \
+ && EASY_VECTOR_15((n) >> 1) \
+ && ((n) & 1) == 0)
+
/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
and check its validity for a certain class.
We have two alternate definitions for each of them.
@@ -2052,16 +1643,14 @@ typedef struct rs6000_args
/* Nonzero if X is a hard reg that can be used as an index
or if it is a pseudo reg in the non-strict case. */
#define INT_REG_OK_FOR_INDEX_P(X, STRICT) \
- ((! (STRICT) \
- && (REGNO (X) <= 31 \
- || REGNO (X) == ARG_POINTER_REGNUM \
- || REGNO (X) >= FIRST_PSEUDO_REGISTER)) \
- || ((STRICT) && REGNO_OK_FOR_INDEX_P (REGNO (X))))
+ ((!(STRICT) && REGNO (X) >= FIRST_PSEUDO_REGISTER) \
+ || REGNO_OK_FOR_INDEX_P (REGNO (X)))
/* Nonzero if X is a hard reg that can be used as a base reg
or if it is a pseudo reg in the non-strict case. */
#define INT_REG_OK_FOR_BASE_P(X, STRICT) \
- (REGNO (X) > 0 && INT_REG_OK_FOR_INDEX_P (X, (STRICT)))
+ ((!(STRICT) && REGNO (X) >= FIRST_PSEUDO_REGISTER) \
+ || REGNO_OK_FOR_BASE_P (REGNO (X)))
#define REG_OK_FOR_INDEX_P(X) INT_REG_OK_FOR_INDEX_P (X, REG_OK_STRICT_FLAG)
#define REG_OK_FOR_BASE_P(X) INT_REG_OK_FOR_BASE_P (X, REG_OK_STRICT_FLAG)
@@ -2071,7 +1660,7 @@ typedef struct rs6000_args
The MODE argument is the machine mode for the MEM expression
that wants to use this address.
- On the RS/6000, there are four valid address: a SYMBOL_REF that
+ On the RS/6000, there are four valid addresses: a SYMBOL_REF that
refers to a constant pool entry of an address (or the sum of it
plus a constant), a short (16-bit signed) constant plus a register,
the sum of two registers, or a register indirect, possibly with an
@@ -2125,9 +1714,9 @@ typedef struct rs6000_args
operand. If we find one, push the reload and jump to WIN. This
macro is used in only one place: `find_reloads_address' in reload.c.
- Implemented on rs6000 by rs6000_legitimize_reload_address.
+ Implemented on rs6000 by rs6000_legitimize_reload_address.
Note that (X) is evaluated twice; this is safe in current usage. */
-
+
#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
do { \
int win; \
@@ -2165,23 +1754,6 @@ do { \
/* #define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED */
-/* By generating position-independent code, when two different
- programs (A and B) share a common library (libC.a), the text of
- the library can be shared whether or not the library is linked at
- the same address for both programs. In some of these
- environments, position-independent code requires not only the use
- of different addressing modes, but also special code to enable the
- use of these addressing modes.
-
- The `FINALIZE_PIC' macro serves as a hook to emit these special
- codes once the function is being compiled into assembly code, but
- not before. (It is not done before, because in the case of
- compiling an inline function, it would lead to multiple PIC
- prologues being included in functions which used inline functions
- and were compiled to assembly language.) */
-
-/* #define FINALIZE_PIC */
-
/* A C expression that is nonzero if X is a legitimate immediate
operand on the target machine when generating position independent
code. You can assume that X satisfies `CONSTANT_P', so you need
@@ -2215,6 +1787,12 @@ do { \
/* #define FIXUNS_TRUNC_LIKE_FIX_TRUNC */
+/* An integer expression for the size in bits of the largest integer machine
+ mode that should actually be used. */
+
+/* Allow pairs of registers to be used, which is the intent of the default. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_POWERPC64 ? TImode : DImode)
+
/* Max number of bytes we can move from memory to memory
in one reasonably fast instruction. */
#define MOVE_MAX (! TARGET_POWERPC64 ? 4 : 8)
@@ -2232,7 +1810,7 @@ do { \
/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
will either zero-extend or sign-extend. The value of this macro should
be the code that says which one of the two operations is implicitly
- done, NIL if none. */
+ done, UNKNOWN if none. */
#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
/* Define if loading short immediate values into registers sign extends. */
@@ -2290,9 +1868,9 @@ do { \
comparison. CCmode should be used in all other cases. */
#define SELECT_CC_MODE(OP,X,Y) \
- (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT ? CCFPmode \
+ (SCALAR_FLOAT_MODE_P (GET_MODE (X)) ? CCFPmode \
: (OP) == GTU || (OP) == LTU || (OP) == GEU || (OP) == LEU ? CCUNSmode \
- : (((OP) == EQ || (OP) == NE) && GET_RTX_CLASS (GET_CODE (X)) == '<' \
+ : (((OP) == EQ || (OP) == NE) && COMPARISON_P (X) \
? CCEQmode : CCmode))
/* Can the condition code MODE be safely reversed? This is safe in
@@ -2317,10 +1895,6 @@ extern int rs6000_compare_fp_p;
the end of the line. */
#define ASM_COMMENT_START " #"
-/* Implicit library calls should use memcpy, not bcopy, etc. */
-
-#define TARGET_MEM_FUNCTIONS
-
/* Flag to say the TOC is initialized */
extern int toc_initialized;
@@ -2349,9 +1923,9 @@ extern int toc_initialized;
do \
{ \
fputs ("\t.weak\t", (FILE)); \
- RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
- && DEFAULT_ABI == ABI_AIX) \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
{ \
if (TARGET_XCOFF) \
fputs ("[DS]", (FILE)); \
@@ -2363,7 +1937,7 @@ extern int toc_initialized;
{ \
ASM_OUTPUT_DEF ((FILE), (NAME), (VAL)); \
if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
- && DEFAULT_ABI == ABI_AIX) \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
{ \
fputs ("\t.set\t.", (FILE)); \
RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
@@ -2376,6 +1950,26 @@ extern int toc_initialized;
while (0)
#endif
+#if HAVE_GAS_WEAKREF
+#define ASM_OUTPUT_WEAKREF(FILE, DECL, NAME, VALUE) \
+ do \
+ { \
+ fputs ("\t.weakref\t", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (", ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VALUE)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ fputs ("\n\t.weakref\t.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (", .", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VALUE)); \
+ } \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
/* This implements the `alias' attribute. */
#undef ASM_OUTPUT_DEF_FROM_DECLS
#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \
@@ -2384,7 +1978,7 @@ extern int toc_initialized;
const char *alias = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
const char *name = IDENTIFIER_POINTER (TARGET); \
if (TREE_CODE (DECL) == FUNCTION_DECL \
- && DEFAULT_ABI == ABI_AIX) \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
{ \
if (TREE_PUBLIC (DECL)) \
{ \
@@ -2548,6 +2142,7 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
&rs6000_reg_names[110][0], /* vscr */ \
&rs6000_reg_names[111][0], /* spe_acc */ \
&rs6000_reg_names[112][0], /* spefscr */ \
+ &rs6000_reg_names[113][0], /* sfp */ \
}
/* Table of additional register names to use in user input. */
@@ -2635,79 +2230,6 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
-/* Define the codes that are matched by predicates in rs6000.c. */
-
-#define PREDICATE_CODES \
- {"any_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
- LABEL_REF, SUBREG, REG, MEM, PARALLEL}}, \
- {"zero_constant", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
- LABEL_REF, SUBREG, REG, MEM}}, \
- {"short_cint_operand", {CONST_INT}}, \
- {"u_short_cint_operand", {CONST_INT}}, \
- {"non_short_cint_operand", {CONST_INT}}, \
- {"exact_log2_cint_operand", {CONST_INT}}, \
- {"gpc_reg_operand", {SUBREG, REG}}, \
- {"cc_reg_operand", {SUBREG, REG}}, \
- {"cc_reg_not_cr0_operand", {SUBREG, REG}}, \
- {"reg_or_short_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_neg_short_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_aligned_short_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_u_short_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_cint_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_arith_cint_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_add_cint64_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_sub_cint64_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_logical_cint_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
- {"got_operand", {SYMBOL_REF, CONST, LABEL_REF}}, \
- {"got_no_const_operand", {SYMBOL_REF, LABEL_REF}}, \
- {"easy_fp_constant", {CONST_DOUBLE}}, \
- {"easy_vector_constant", {CONST_VECTOR}}, \
- {"easy_vector_constant_add_self", {CONST_VECTOR}}, \
- {"zero_fp_constant", {CONST_DOUBLE}}, \
- {"reg_or_mem_operand", {SUBREG, MEM, REG}}, \
- {"lwa_operand", {SUBREG, MEM, REG}}, \
- {"volatile_mem_operand", {MEM}}, \
- {"offsettable_mem_operand", {MEM}}, \
- {"mem_or_easy_const_operand", {SUBREG, MEM, CONST_DOUBLE}}, \
- {"add_operand", {SUBREG, REG, CONST_INT}}, \
- {"non_add_cint_operand", {CONST_INT}}, \
- {"and_operand", {SUBREG, REG, CONST_INT}}, \
- {"and64_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
- {"and64_2_operand", {SUBREG, REG, CONST_INT}}, \
- {"logical_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
- {"non_logical_cint_operand", {CONST_INT, CONST_DOUBLE}}, \
- {"mask_operand", {CONST_INT}}, \
- {"mask_operand_wrap", {CONST_INT}}, \
- {"mask64_operand", {CONST_INT}}, \
- {"mask64_2_operand", {CONST_INT}}, \
- {"count_register_operand", {REG}}, \
- {"xer_operand", {REG}}, \
- {"symbol_ref_operand", {SYMBOL_REF}}, \
- {"rs6000_tls_symbol_ref", {SYMBOL_REF}}, \
- {"call_operand", {SYMBOL_REF, REG}}, \
- {"current_file_function_operand", {SYMBOL_REF}}, \
- {"input_operand", {SUBREG, MEM, REG, CONST_INT, \
- CONST_DOUBLE, SYMBOL_REF}}, \
- {"load_multiple_operation", {PARALLEL}}, \
- {"store_multiple_operation", {PARALLEL}}, \
- {"vrsave_operation", {PARALLEL}}, \
- {"branch_comparison_operator", {EQ, NE, LE, LT, GE, \
- GT, LEU, LTU, GEU, GTU, \
- UNORDERED, ORDERED, \
- UNGE, UNLE }}, \
- {"branch_positive_comparison_operator", {EQ, LT, GT, LTU, GTU, \
- UNORDERED }}, \
- {"scc_comparison_operator", {EQ, NE, LE, LT, GE, \
- GT, LEU, LTU, GEU, GTU, \
- UNORDERED, ORDERED, \
- UNGE, UNLE }}, \
- {"trap_comparison_operator", {EQ, NE, LE, LT, GE, \
- GT, LEU, LTU, GEU, GTU}}, \
- {"boolean_operator", {AND, IOR, XOR}}, \
- {"boolean_or_operator", {IOR, XOR}}, \
- {"altivec_register_operand", {REG}}, \
- {"min_max_operator", {SMIN, SMAX, UMIN, UMAX}},
-
/* uncomment for disabling the corresponding default options */
/* #define MACHINE_no_sched_interblock */
/* #define MACHINE_no_sched_speculative */
@@ -2923,7 +2445,237 @@ enum rs6000_builtins
ALTIVEC_BUILTIN_ABS_V4SF,
ALTIVEC_BUILTIN_ABS_V8HI,
ALTIVEC_BUILTIN_ABS_V16QI,
- ALTIVEC_BUILTIN_COMPILETIME_ERROR,
+ ALTIVEC_BUILTIN_MASK_FOR_LOAD,
+ ALTIVEC_BUILTIN_MASK_FOR_STORE,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SI,
+ ALTIVEC_BUILTIN_VEC_INIT_V8HI,
+ ALTIVEC_BUILTIN_VEC_INIT_V16QI,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SF,
+ ALTIVEC_BUILTIN_VEC_SET_V4SI,
+ ALTIVEC_BUILTIN_VEC_SET_V8HI,
+ ALTIVEC_BUILTIN_VEC_SET_V16QI,
+ ALTIVEC_BUILTIN_VEC_SET_V4SF,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SI,
+ ALTIVEC_BUILTIN_VEC_EXT_V8HI,
+ ALTIVEC_BUILTIN_VEC_EXT_V16QI,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SF,
+
+ /* Altivec overloaded builtins. */
+ ALTIVEC_BUILTIN_VCMPEQ_P,
+ ALTIVEC_BUILTIN_OVERLOADED_FIRST = ALTIVEC_BUILTIN_VCMPEQ_P,
+ ALTIVEC_BUILTIN_VCMPGT_P,
+ ALTIVEC_BUILTIN_VCMPGE_P,
+ ALTIVEC_BUILTIN_VEC_ABS,
+ ALTIVEC_BUILTIN_VEC_ABSS,
+ ALTIVEC_BUILTIN_VEC_ADD,
+ ALTIVEC_BUILTIN_VEC_ADDC,
+ ALTIVEC_BUILTIN_VEC_ADDS,
+ ALTIVEC_BUILTIN_VEC_AND,
+ ALTIVEC_BUILTIN_VEC_ANDC,
+ ALTIVEC_BUILTIN_VEC_AVG,
+ ALTIVEC_BUILTIN_VEC_CEIL,
+ ALTIVEC_BUILTIN_VEC_CMPB,
+ ALTIVEC_BUILTIN_VEC_CMPEQ,
+ ALTIVEC_BUILTIN_VEC_CMPEQUB,
+ ALTIVEC_BUILTIN_VEC_CMPEQUH,
+ ALTIVEC_BUILTIN_VEC_CMPEQUW,
+ ALTIVEC_BUILTIN_VEC_CMPGE,
+ ALTIVEC_BUILTIN_VEC_CMPGT,
+ ALTIVEC_BUILTIN_VEC_CMPLE,
+ ALTIVEC_BUILTIN_VEC_CMPLT,
+ ALTIVEC_BUILTIN_VEC_CTF,
+ ALTIVEC_BUILTIN_VEC_CTS,
+ ALTIVEC_BUILTIN_VEC_CTU,
+ ALTIVEC_BUILTIN_VEC_DST,
+ ALTIVEC_BUILTIN_VEC_DSTST,
+ ALTIVEC_BUILTIN_VEC_DSTSTT,
+ ALTIVEC_BUILTIN_VEC_DSTT,
+ ALTIVEC_BUILTIN_VEC_EXPTE,
+ ALTIVEC_BUILTIN_VEC_FLOOR,
+ ALTIVEC_BUILTIN_VEC_LD,
+ ALTIVEC_BUILTIN_VEC_LDE,
+ ALTIVEC_BUILTIN_VEC_LDL,
+ ALTIVEC_BUILTIN_VEC_LOGE,
+ ALTIVEC_BUILTIN_VEC_LVEBX,
+ ALTIVEC_BUILTIN_VEC_LVEHX,
+ ALTIVEC_BUILTIN_VEC_LVEWX,
+ ALTIVEC_BUILTIN_VEC_LVSL,
+ ALTIVEC_BUILTIN_VEC_LVSR,
+ ALTIVEC_BUILTIN_VEC_MADD,
+ ALTIVEC_BUILTIN_VEC_MADDS,
+ ALTIVEC_BUILTIN_VEC_MAX,
+ ALTIVEC_BUILTIN_VEC_MERGEH,
+ ALTIVEC_BUILTIN_VEC_MERGEL,
+ ALTIVEC_BUILTIN_VEC_MIN,
+ ALTIVEC_BUILTIN_VEC_MLADD,
+ ALTIVEC_BUILTIN_VEC_MPERM,
+ ALTIVEC_BUILTIN_VEC_MRADDS,
+ ALTIVEC_BUILTIN_VEC_MRGHB,
+ ALTIVEC_BUILTIN_VEC_MRGHH,
+ ALTIVEC_BUILTIN_VEC_MRGHW,
+ ALTIVEC_BUILTIN_VEC_MRGLB,
+ ALTIVEC_BUILTIN_VEC_MRGLH,
+ ALTIVEC_BUILTIN_VEC_MRGLW,
+ ALTIVEC_BUILTIN_VEC_MSUM,
+ ALTIVEC_BUILTIN_VEC_MSUMS,
+ ALTIVEC_BUILTIN_VEC_MTVSCR,
+ ALTIVEC_BUILTIN_VEC_MULE,
+ ALTIVEC_BUILTIN_VEC_MULO,
+ ALTIVEC_BUILTIN_VEC_NMSUB,
+ ALTIVEC_BUILTIN_VEC_NOR,
+ ALTIVEC_BUILTIN_VEC_OR,
+ ALTIVEC_BUILTIN_VEC_PACK,
+ ALTIVEC_BUILTIN_VEC_PACKPX,
+ ALTIVEC_BUILTIN_VEC_PACKS,
+ ALTIVEC_BUILTIN_VEC_PACKSU,
+ ALTIVEC_BUILTIN_VEC_PERM,
+ ALTIVEC_BUILTIN_VEC_RE,
+ ALTIVEC_BUILTIN_VEC_RL,
+ ALTIVEC_BUILTIN_VEC_ROUND,
+ ALTIVEC_BUILTIN_VEC_RSQRTE,
+ ALTIVEC_BUILTIN_VEC_SEL,
+ ALTIVEC_BUILTIN_VEC_SL,
+ ALTIVEC_BUILTIN_VEC_SLD,
+ ALTIVEC_BUILTIN_VEC_SLL,
+ ALTIVEC_BUILTIN_VEC_SLO,
+ ALTIVEC_BUILTIN_VEC_SPLAT,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S16,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S32,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S8,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U16,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U32,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U8,
+ ALTIVEC_BUILTIN_VEC_SPLTB,
+ ALTIVEC_BUILTIN_VEC_SPLTH,
+ ALTIVEC_BUILTIN_VEC_SPLTW,
+ ALTIVEC_BUILTIN_VEC_SR,
+ ALTIVEC_BUILTIN_VEC_SRA,
+ ALTIVEC_BUILTIN_VEC_SRL,
+ ALTIVEC_BUILTIN_VEC_SRO,
+ ALTIVEC_BUILTIN_VEC_ST,
+ ALTIVEC_BUILTIN_VEC_STE,
+ ALTIVEC_BUILTIN_VEC_STL,
+ ALTIVEC_BUILTIN_VEC_STVEBX,
+ ALTIVEC_BUILTIN_VEC_STVEHX,
+ ALTIVEC_BUILTIN_VEC_STVEWX,
+ ALTIVEC_BUILTIN_VEC_SUB,
+ ALTIVEC_BUILTIN_VEC_SUBC,
+ ALTIVEC_BUILTIN_VEC_SUBS,
+ ALTIVEC_BUILTIN_VEC_SUM2S,
+ ALTIVEC_BUILTIN_VEC_SUM4S,
+ ALTIVEC_BUILTIN_VEC_SUMS,
+ ALTIVEC_BUILTIN_VEC_TRUNC,
+ ALTIVEC_BUILTIN_VEC_UNPACKH,
+ ALTIVEC_BUILTIN_VEC_UNPACKL,
+ ALTIVEC_BUILTIN_VEC_VADDFP,
+ ALTIVEC_BUILTIN_VEC_VADDSBS,
+ ALTIVEC_BUILTIN_VEC_VADDSHS,
+ ALTIVEC_BUILTIN_VEC_VADDSWS,
+ ALTIVEC_BUILTIN_VEC_VADDUBM,
+ ALTIVEC_BUILTIN_VEC_VADDUBS,
+ ALTIVEC_BUILTIN_VEC_VADDUHM,
+ ALTIVEC_BUILTIN_VEC_VADDUHS,
+ ALTIVEC_BUILTIN_VEC_VADDUWM,
+ ALTIVEC_BUILTIN_VEC_VADDUWS,
+ ALTIVEC_BUILTIN_VEC_VAVGSB,
+ ALTIVEC_BUILTIN_VEC_VAVGSH,
+ ALTIVEC_BUILTIN_VEC_VAVGSW,
+ ALTIVEC_BUILTIN_VEC_VAVGUB,
+ ALTIVEC_BUILTIN_VEC_VAVGUH,
+ ALTIVEC_BUILTIN_VEC_VAVGUW,
+ ALTIVEC_BUILTIN_VEC_VCFSX,
+ ALTIVEC_BUILTIN_VEC_VCFUX,
+ ALTIVEC_BUILTIN_VEC_VCMPEQFP,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUB,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUH,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUW,
+ ALTIVEC_BUILTIN_VEC_VCMPGTFP,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSB,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSH,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSW,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUB,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUH,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUW,
+ ALTIVEC_BUILTIN_VEC_VMAXFP,
+ ALTIVEC_BUILTIN_VEC_VMAXSB,
+ ALTIVEC_BUILTIN_VEC_VMAXSH,
+ ALTIVEC_BUILTIN_VEC_VMAXSW,
+ ALTIVEC_BUILTIN_VEC_VMAXUB,
+ ALTIVEC_BUILTIN_VEC_VMAXUH,
+ ALTIVEC_BUILTIN_VEC_VMAXUW,
+ ALTIVEC_BUILTIN_VEC_VMINFP,
+ ALTIVEC_BUILTIN_VEC_VMINSB,
+ ALTIVEC_BUILTIN_VEC_VMINSH,
+ ALTIVEC_BUILTIN_VEC_VMINSW,
+ ALTIVEC_BUILTIN_VEC_VMINUB,
+ ALTIVEC_BUILTIN_VEC_VMINUH,
+ ALTIVEC_BUILTIN_VEC_VMINUW,
+ ALTIVEC_BUILTIN_VEC_VMRGHB,
+ ALTIVEC_BUILTIN_VEC_VMRGHH,
+ ALTIVEC_BUILTIN_VEC_VMRGHW,
+ ALTIVEC_BUILTIN_VEC_VMRGLB,
+ ALTIVEC_BUILTIN_VEC_VMRGLH,
+ ALTIVEC_BUILTIN_VEC_VMRGLW,
+ ALTIVEC_BUILTIN_VEC_VMSUMMBM,
+ ALTIVEC_BUILTIN_VEC_VMSUMSHM,
+ ALTIVEC_BUILTIN_VEC_VMSUMSHS,
+ ALTIVEC_BUILTIN_VEC_VMSUMUBM,
+ ALTIVEC_BUILTIN_VEC_VMSUMUHM,
+ ALTIVEC_BUILTIN_VEC_VMSUMUHS,
+ ALTIVEC_BUILTIN_VEC_VMULESB,
+ ALTIVEC_BUILTIN_VEC_VMULESH,
+ ALTIVEC_BUILTIN_VEC_VMULEUB,
+ ALTIVEC_BUILTIN_VEC_VMULEUH,
+ ALTIVEC_BUILTIN_VEC_VMULOSB,
+ ALTIVEC_BUILTIN_VEC_VMULOSH,
+ ALTIVEC_BUILTIN_VEC_VMULOUB,
+ ALTIVEC_BUILTIN_VEC_VMULOUH,
+ ALTIVEC_BUILTIN_VEC_VPKSHSS,
+ ALTIVEC_BUILTIN_VEC_VPKSHUS,
+ ALTIVEC_BUILTIN_VEC_VPKSWSS,
+ ALTIVEC_BUILTIN_VEC_VPKSWUS,
+ ALTIVEC_BUILTIN_VEC_VPKUHUM,
+ ALTIVEC_BUILTIN_VEC_VPKUHUS,
+ ALTIVEC_BUILTIN_VEC_VPKUWUM,
+ ALTIVEC_BUILTIN_VEC_VPKUWUS,
+ ALTIVEC_BUILTIN_VEC_VRLB,
+ ALTIVEC_BUILTIN_VEC_VRLH,
+ ALTIVEC_BUILTIN_VEC_VRLW,
+ ALTIVEC_BUILTIN_VEC_VSLB,
+ ALTIVEC_BUILTIN_VEC_VSLH,
+ ALTIVEC_BUILTIN_VEC_VSLW,
+ ALTIVEC_BUILTIN_VEC_VSPLTB,
+ ALTIVEC_BUILTIN_VEC_VSPLTH,
+ ALTIVEC_BUILTIN_VEC_VSPLTW,
+ ALTIVEC_BUILTIN_VEC_VSRAB,
+ ALTIVEC_BUILTIN_VEC_VSRAH,
+ ALTIVEC_BUILTIN_VEC_VSRAW,
+ ALTIVEC_BUILTIN_VEC_VSRB,
+ ALTIVEC_BUILTIN_VEC_VSRH,
+ ALTIVEC_BUILTIN_VEC_VSRW,
+ ALTIVEC_BUILTIN_VEC_VSUBFP,
+ ALTIVEC_BUILTIN_VEC_VSUBSBS,
+ ALTIVEC_BUILTIN_VEC_VSUBSHS,
+ ALTIVEC_BUILTIN_VEC_VSUBSWS,
+ ALTIVEC_BUILTIN_VEC_VSUBUBM,
+ ALTIVEC_BUILTIN_VEC_VSUBUBS,
+ ALTIVEC_BUILTIN_VEC_VSUBUHM,
+ ALTIVEC_BUILTIN_VEC_VSUBUHS,
+ ALTIVEC_BUILTIN_VEC_VSUBUWM,
+ ALTIVEC_BUILTIN_VEC_VSUBUWS,
+ ALTIVEC_BUILTIN_VEC_VSUM4SBS,
+ ALTIVEC_BUILTIN_VEC_VSUM4SHS,
+ ALTIVEC_BUILTIN_VEC_VSUM4UBS,
+ ALTIVEC_BUILTIN_VEC_VUPKHPX,
+ ALTIVEC_BUILTIN_VEC_VUPKHSB,
+ ALTIVEC_BUILTIN_VEC_VUPKHSH,
+ ALTIVEC_BUILTIN_VEC_VUPKLPX,
+ ALTIVEC_BUILTIN_VEC_VUPKLSB,
+ ALTIVEC_BUILTIN_VEC_VUPKLSH,
+ ALTIVEC_BUILTIN_VEC_XOR,
+ ALTIVEC_BUILTIN_VEC_STEP,
+ ALTIVEC_BUILTIN_OVERLOADED_LAST = ALTIVEC_BUILTIN_VEC_STEP,
+
/* SPE builtins. */
SPE_BUILTIN_EVADDW,
SPE_BUILTIN_EVAND,
@@ -3157,5 +2909,84 @@ enum rs6000_builtins
SPE_BUILTIN_EVMWHGUMIAN,
SPE_BUILTIN_MTSPEFSCR,
SPE_BUILTIN_MFSPEFSCR,
- SPE_BUILTIN_BRINC
+ SPE_BUILTIN_BRINC,
+
+ RS6000_BUILTIN_COUNT
};
+
+enum rs6000_builtin_type_index
+{
+ RS6000_BTI_NOT_OPAQUE,
+ RS6000_BTI_opaque_V2SI,
+ RS6000_BTI_opaque_V2SF,
+ RS6000_BTI_opaque_p_V2SI,
+ RS6000_BTI_opaque_V4SI,
+ RS6000_BTI_V16QI,
+ RS6000_BTI_V2SI,
+ RS6000_BTI_V2SF,
+ RS6000_BTI_V4HI,
+ RS6000_BTI_V4SI,
+ RS6000_BTI_V4SF,
+ RS6000_BTI_V8HI,
+ RS6000_BTI_unsigned_V16QI,
+ RS6000_BTI_unsigned_V8HI,
+ RS6000_BTI_unsigned_V4SI,
+ RS6000_BTI_bool_char, /* __bool char */
+ RS6000_BTI_bool_short, /* __bool short */
+ RS6000_BTI_bool_int, /* __bool int */
+ RS6000_BTI_pixel, /* __pixel */
+ RS6000_BTI_bool_V16QI, /* __vector __bool char */
+ RS6000_BTI_bool_V8HI, /* __vector __bool short */
+ RS6000_BTI_bool_V4SI, /* __vector __bool int */
+ RS6000_BTI_pixel_V8HI, /* __vector __pixel */
+ RS6000_BTI_long, /* long_integer_type_node */
+ RS6000_BTI_unsigned_long, /* long_unsigned_type_node */
+ RS6000_BTI_INTQI, /* intQI_type_node */
+ RS6000_BTI_UINTQI, /* unsigned_intQI_type_node */
+ RS6000_BTI_INTHI, /* intHI_type_node */
+ RS6000_BTI_UINTHI, /* unsigned_intHI_type_node */
+ RS6000_BTI_INTSI, /* intSI_type_node */
+ RS6000_BTI_UINTSI, /* unsigned_intSI_type_node */
+ RS6000_BTI_float, /* float_type_node */
+ RS6000_BTI_void, /* void_type_node */
+ RS6000_BTI_MAX
+};
+
+
+#define opaque_V2SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V2SI])
+#define opaque_V2SF_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V2SF])
+#define opaque_p_V2SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_p_V2SI])
+#define opaque_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V4SI])
+#define V16QI_type_node (rs6000_builtin_types[RS6000_BTI_V16QI])
+#define V2SI_type_node (rs6000_builtin_types[RS6000_BTI_V2SI])
+#define V2SF_type_node (rs6000_builtin_types[RS6000_BTI_V2SF])
+#define V4HI_type_node (rs6000_builtin_types[RS6000_BTI_V4HI])
+#define V4SI_type_node (rs6000_builtin_types[RS6000_BTI_V4SI])
+#define V4SF_type_node (rs6000_builtin_types[RS6000_BTI_V4SF])
+#define V8HI_type_node (rs6000_builtin_types[RS6000_BTI_V8HI])
+#define unsigned_V16QI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V16QI])
+#define unsigned_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V8HI])
+#define unsigned_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V4SI])
+#define bool_char_type_node (rs6000_builtin_types[RS6000_BTI_bool_char])
+#define bool_short_type_node (rs6000_builtin_types[RS6000_BTI_bool_short])
+#define bool_int_type_node (rs6000_builtin_types[RS6000_BTI_bool_int])
+#define pixel_type_node (rs6000_builtin_types[RS6000_BTI_pixel])
+#define bool_V16QI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V16QI])
+#define bool_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V8HI])
+#define bool_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V4SI])
+#define pixel_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_pixel_V8HI])
+
+#define long_integer_type_internal_node (rs6000_builtin_types[RS6000_BTI_long])
+#define long_unsigned_type_internal_node (rs6000_builtin_types[RS6000_BTI_unsigned_long])
+#define intQI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTQI])
+#define uintQI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTQI])
+#define intHI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTHI])
+#define uintHI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTHI])
+#define intSI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTSI])
+#define uintSI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTSI])
+#define float_type_internal_node (rs6000_builtin_types[RS6000_BTI_float])
+#define void_type_internal_node (rs6000_builtin_types[RS6000_BTI_void])
+
+extern GTY(()) tree rs6000_builtin_types[RS6000_BTI_MAX];
+extern GTY(()) tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
+
diff --git a/contrib/gcc/config/rs6000/rs6000.md b/contrib/gcc/config/rs6000/rs6000.md
index 0e242d4..21cc207 100644
--- a/contrib/gcc/config/rs6000/rs6000.md
+++ b/contrib/gcc/config/rs6000/rs6000.md
@@ -1,6 +1,7 @@
;; Machine description for IBM RISC System 6000 (POWER) for GNU C compiler
;; Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
-;; 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+;; 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+;; Free Software Foundation, Inc.
;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
;; This file is part of GCC.
@@ -17,8 +18,8 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
@@ -34,6 +35,10 @@
(UNSPEC_MOVSI_GOT 8)
(UNSPEC_MV_CR_OV 9) ; move_from_CR_ov_bit
(UNSPEC_FCTIWZ 10)
+ (UNSPEC_FRIM 11)
+ (UNSPEC_FRIN 12)
+ (UNSPEC_FRIP 13)
+ (UNSPEC_FRIZ 14)
(UNSPEC_LD_MPIC 15) ; load_macho_picbase
(UNSPEC_MPIC_CORRECT 16) ; macho_correct_pic
(UNSPEC_TLSGD 17)
@@ -50,7 +55,23 @@
(UNSPEC_TLSGOTTPREL 28)
(UNSPEC_TLSTLS 29)
(UNSPEC_FIX_TRUNC_TF 30) ; fadd, rounding towards zero
- (UNSPEC_MV_CR_EQ 31) ; move_from_CR_eq_bit
+ (UNSPEC_MV_CR_GT 31) ; move_from_CR_gt_bit
+ (UNSPEC_STFIWX 32)
+ (UNSPEC_POPCNTB 33)
+ (UNSPEC_FRES 34)
+ (UNSPEC_SP_SET 35)
+ (UNSPEC_SP_TEST 36)
+ (UNSPEC_SYNC 37)
+ (UNSPEC_LWSYNC 38)
+ (UNSPEC_ISYNC 39)
+ (UNSPEC_SYNC_OP 40)
+ (UNSPEC_ATOMIC 41)
+ (UNSPEC_CMPXCHG 42)
+ (UNSPEC_XCHG 43)
+ (UNSPEC_AND 44)
+ (UNSPEC_DLMZB 45)
+ (UNSPEC_DLMZB_CR 46)
+ (UNSPEC_DLMZB_STRLEN 47)
])
;;
@@ -59,16 +80,18 @@
(define_constants
[(UNSPECV_BLOCK 0)
+ (UNSPECV_LL 1) ; load-locked
+ (UNSPECV_SC 2) ; store-conditional
(UNSPECV_EH_RR 9) ; eh_reg_restore
])
;; Define an insn type attribute. This is used in function unit delay
;; computations.
-(define_attr "type" "integer,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv"
+(define_attr "type" "integer,two,three,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv,isync,sync,load_l,store_c"
(const_string "integer"))
;; Length (in bytes).
-; '(pc)' in the following doesn't include the instruction itself; it is
+; '(pc)' in the following doesn't include the instruction itself; it is
; calculated as if the instruction had zero size.
(define_attr "length" ""
(if_then_else (eq_attr "type" "branch")
@@ -102,40 +125,81 @@
(include "power4.md")
(include "power5.md")
+(include "predicates.md")
+(include "constraints.md")
+
+(include "darwin.md")
+
+
+;; Mode macros
+
+; This mode macro allows :GPR to be used to indicate the allowable size
+; of whole values in GPRs.
+(define_mode_macro GPR [SI (DI "TARGET_POWERPC64")])
+
+; Any supported integer mode.
+(define_mode_macro INT [QI HI SI DI TI])
+
+; Any supported integer mode that fits in one register.
+(define_mode_macro INT1 [QI HI SI (DI "TARGET_POWERPC64")])
+
+; extend modes for DImode
+(define_mode_macro QHSI [QI HI SI])
+
+; SImode or DImode, even if DImode doesn't fit in GPRs.
+(define_mode_macro SDI [SI DI])
+
+; The size of a pointer. Also, the size of the value that a record-condition
+; (one with a '.') will compare.
+(define_mode_macro P [(SI "TARGET_32BIT") (DI "TARGET_64BIT")])
+
+; Any hardware-supported floating-point mode
+(define_mode_macro FP [(SF "TARGET_HARD_FLOAT")
+ (DF "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)")
+ (TF "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128")])
+
+; Various instructions that come in SI and DI forms.
+; A generic w/d attribute, for things like cmpw/cmpd.
+(define_mode_attr wd [(QI "b") (HI "h") (SI "w") (DI "d")])
+
+; DImode bits
+(define_mode_attr dbits [(QI "56") (HI "48") (SI "32")])
+
;; Start with fixed-point load and store insns. Here we put only the more
;; complex forms. Basic data transfer is done later.
-(define_expand "zero_extendqidi2"
+(define_expand "zero_extend<mode>di2"
[(set (match_operand:DI 0 "gpc_reg_operand" "")
- (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "")))]
"TARGET_POWERPC64"
"")
-(define_insn ""
+(define_insn "*zero_extend<mode>di2_internal1"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (zero_extend:DI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ (zero_extend:DI (match_operand:QHSI 1 "reg_or_mem_operand" "m,r")))]
"TARGET_POWERPC64"
"@
- lbz%U1%X1 %0,%1
- rldicl %0,%1,0,56"
+ l<wd>z%U1%X1 %0,%1
+ rldicl %0,%1,0,<dbits>"
[(set_attr "type" "load,*")])
-(define_insn ""
+(define_insn "*zero_extend<mode>di2_internal2"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:DI 2 "=r,r"))]
"TARGET_64BIT"
"@
- rldicl. %2,%1,0,56
+ rldicl. %2,%1,0,<dbits>
#"
[(set_attr "type" "compare")
(set_attr "length" "4,8")])
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" ""))
(const_int 0)))
(clobber (match_scratch:DI 2 ""))]
"TARGET_POWERPC64 && reload_completed"
@@ -146,22 +210,22 @@
(const_int 0)))]
"")
-(define_insn ""
+(define_insn "*zero_extend<mode>di2_internal3"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
(zero_extend:DI (match_dup 1)))]
"TARGET_64BIT"
"@
- rldicl. %0,%1,0,56
+ rldicl. %0,%1,0,<dbits>
#"
[(set_attr "type" "compare")
(set_attr "length" "4,8")])
(define_split
[(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" ""))
(const_int 0)))
(set (match_operand:DI 0 "gpc_reg_operand" "")
(zero_extend:DI (match_dup 1)))]
@@ -231,73 +295,6 @@
(const_int 0)))]
"")
-(define_expand "zero_extendhidi2"
- [(set (match_operand:DI 0 "gpc_reg_operand" "")
- (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" "")))]
- "TARGET_POWERPC64"
- "")
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (zero_extend:DI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
- "TARGET_POWERPC64"
- "@
- lhz%U1%X1 %0,%1
- rldicl %0,%1,0,48"
- [(set_attr "type" "load,*")])
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (clobber (match_scratch:DI 2 "=r,r"))]
- "TARGET_64BIT"
- "@
- rldicl. %2,%1,0,48
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 2 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 2)
- (zero_extend:DI (match_dup 1)))
- (set (match_dup 0)
- (compare:CC (match_dup 2)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (zero_extend:DI (match_dup 1)))]
- "TARGET_64BIT"
- "@
- rldicl. %0,%1,0,48
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (zero_extend:DI (match_dup 1)))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0)
- (zero_extend:DI (match_dup 1)))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
(define_expand "extendhidi2"
[(set (match_operand:DI 0 "gpc_reg_operand" "")
(sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "")))]
@@ -365,73 +362,6 @@
(const_int 0)))]
"")
-(define_expand "zero_extendsidi2"
- [(set (match_operand:DI 0 "gpc_reg_operand" "")
- (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "")))]
- "TARGET_POWERPC64"
- "")
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (zero_extend:DI (match_operand:SI 1 "reg_or_mem_operand" "m,r")))]
- "TARGET_POWERPC64"
- "@
- lwz%U1%X1 %0,%1
- rldicl %0,%1,0,32"
- [(set_attr "type" "load,*")])
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (clobber (match_scratch:DI 2 "=r,r"))]
- "TARGET_64BIT"
- "@
- rldicl. %2,%1,0,32
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 2 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 2)
- (zero_extend:DI (match_dup 1)))
- (set (match_dup 0)
- (compare:CC (match_dup 2)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (zero_extend:DI (match_dup 1)))]
- "TARGET_64BIT"
- "@
- rldicl. %0,%1,0,32
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (zero_extend:DI (match_dup 1)))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0)
- (zero_extend:DI (match_dup 1)))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
(define_expand "extendsidi2"
[(set (match_operand:DI 0 "gpc_reg_operand" "")
(sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "")))]
@@ -953,6 +883,537 @@
[(set_attr "type" "compare")
(set_attr "length" "4,8")])
+;; IBM 405 and 440 half-word multiplication operations.
+
+(define_insn "*macchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "macchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "macchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (zero_extend:SI
+ (match_dup 1)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "macchwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "macchwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "machhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "machhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "machhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "machhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "maclhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "maclhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (zero_extend:SI
+ (match_dup 1))
+ (zero_extend:SI
+ (match_dup 2)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "maclhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "maclhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmacchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1)))))]
+ "TARGET_MULHW"
+ "nmacchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmacchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))))]
+ "TARGET_MULHW"
+ "nmacchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmachhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16)))))]
+ "TARGET_MULHW"
+ "nmachhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmachhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))))]
+ "TARGET_MULHW"
+ "nmachhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmaclhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2)))))]
+ "TARGET_MULHW"
+ "nmaclhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmaclhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))))]
+ "TARGET_MULHW"
+ "nmaclhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1))))]
+ "TARGET_MULHW"
+ "mulchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mulchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (zero_extend:SI
+ (match_dup 1))))]
+ "TARGET_MULHW"
+ "mulchwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mulchwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2))))]
+ "TARGET_MULHW"
+ "mullhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mullhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (zero_extend:SI
+ (match_dup 1))
+ (zero_extend:SI
+ (match_dup 2))))]
+ "TARGET_MULHW"
+ "mullhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mullhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+;; IBM 405 and 440 string-search dlmzb instruction support.
+(define_insn "dlmzb"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")]
+ UNSPEC_DLMZB_CR))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_DLMZB))]
+ "TARGET_DLMZB"
+ "dlmzb. %0, %1, %2")
+
+(define_expand "strlensi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:BLK 1 "general_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_DLMZB_STRLEN))
+ (clobber (match_scratch:CC 4 "=x"))]
+ "TARGET_DLMZB && WORDS_BIG_ENDIAN && !optimize_size"
+{
+ rtx result = operands[0];
+ rtx src = operands[1];
+ rtx search_char = operands[2];
+ rtx align = operands[3];
+ rtx addr, scratch_string, word1, word2, scratch_dlmzb;
+ rtx loop_label, end_label, mem, cr0, cond;
+ if (search_char != const0_rtx
+ || GET_CODE (align) != CONST_INT
+ || INTVAL (align) < 8)
+ FAIL;
+ word1 = gen_reg_rtx (SImode);
+ word2 = gen_reg_rtx (SImode);
+ scratch_dlmzb = gen_reg_rtx (SImode);
+ scratch_string = gen_reg_rtx (Pmode);
+ loop_label = gen_label_rtx ();
+ end_label = gen_label_rtx ();
+ addr = force_reg (Pmode, XEXP (src, 0));
+ emit_move_insn (scratch_string, addr);
+ emit_label (loop_label);
+ mem = change_address (src, SImode, scratch_string);
+ emit_move_insn (word1, mem);
+ emit_move_insn (word2, adjust_address (mem, SImode, 4));
+ cr0 = gen_rtx_REG (CCmode, CR0_REGNO);
+ emit_insn (gen_dlmzb (scratch_dlmzb, word1, word2, cr0));
+ cond = gen_rtx_NE (VOIDmode, cr0, const0_rtx);
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ cond,
+ gen_rtx_LABEL_REF
+ (VOIDmode,
+ end_label),
+ pc_rtx)));
+ emit_insn (gen_addsi3 (scratch_string, scratch_string, GEN_INT (8)));
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_LABEL_REF (VOIDmode, loop_label)));
+ emit_barrier ();
+ emit_label (end_label);
+ emit_insn (gen_addsi3 (scratch_string, scratch_string, scratch_dlmzb));
+ emit_insn (gen_subsi3 (result, scratch_string, addr));
+ emit_insn (gen_subsi3 (result, result, const1_rtx));
+ DONE;
+})
+
(define_split
[(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
(compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
@@ -969,38 +1430,45 @@
;; Fixed-point arithmetic insns.
-;; Discourage ai/addic because of carry but provide it in an alternative
-;; allowing register zero as source.
-(define_expand "addsi3"
- [(set (match_operand:SI 0 "gpc_reg_operand" "")
- (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_arith_cint_operand" "")))]
+(define_expand "add<mode>3"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (plus:SDI (match_operand:SDI 1 "gpc_reg_operand" "")
+ (match_operand:SDI 2 "reg_or_add_cint_operand" "")))]
""
- "
{
- if (GET_CODE (operands[2]) == CONST_INT
- && ! add_operand (operands[2], SImode))
+ if (<MODE>mode == DImode && ! TARGET_POWERPC64)
+ {
+ if (non_short_cint_operand (operands[2], DImode))
+ FAIL;
+ }
+ else if (GET_CODE (operands[2]) == CONST_INT
+ && ! add_operand (operands[2], <MODE>mode))
{
rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
- ? operands[0] : gen_reg_rtx (SImode));
+ ? operands[0] : gen_reg_rtx (<MODE>mode));
HOST_WIDE_INT val = INTVAL (operands[2]);
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
- HOST_WIDE_INT rest = trunc_int_for_mode (val - low, SImode);
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
+
+ if (<MODE>mode == DImode && !satisfies_constraint_L (GEN_INT (rest)))
+ FAIL;
/* The ordering here is important for the prolog expander.
When space is allocated from the stack, adding 'low' first may
produce a temporary deallocation (which would be bad). */
- emit_insn (gen_addsi3 (tmp, operands[1], GEN_INT (rest)));
- emit_insn (gen_addsi3 (operands[0], tmp, GEN_INT (low)));
+ emit_insn (gen_add<mode>3 (tmp, operands[1], GEN_INT (rest)));
+ emit_insn (gen_add<mode>3 (operands[0], tmp, GEN_INT (low)));
DONE;
}
-}")
+})
-(define_insn "*addsi3_internal1"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,?r,r")
- (plus:SI (match_operand:SI 1 "gpc_reg_operand" "%r,b,r,b")
- (match_operand:SI 2 "add_operand" "r,I,I,L")))]
+;; Discourage ai/addic because of carry but provide it in an alternative
+;; allowing register zero as source.
+(define_insn "*add<mode>3_internal1"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r,?r,r")
+ (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,b,r,b")
+ (match_operand:GPR 2 "add_operand" "r,I,I,L")))]
""
"@
{cax|add} %0,%1,%2
@@ -1017,13 +1485,13 @@
"{cau|addis} %0,%1,ha16(%2)"
[(set_attr "length" "4")])
-(define_insn "*addsi3_internal2"
+(define_insn "*add<mode>3_internal2"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
- (match_operand:SI 2 "reg_or_short_operand" "r,I,r,I"))
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I,r,I"))
(const_int 0)))
- (clobber (match_scratch:SI 3 "=r,r,r,r"))]
- "TARGET_32BIT"
+ (clobber (match_scratch:P 3 "=r,r,r,r"))]
+ ""
"@
{cax.|add.} %3,%1,%2
{ai.|addic.} %3,%1,%2
@@ -1034,28 +1502,28 @@
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_short_operand" ""))
+ (compare:CC (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "reg_or_short_operand" ""))
(const_int 0)))
- (clobber (match_scratch:SI 3 ""))]
- "TARGET_32BIT && reload_completed"
+ (clobber (match_scratch:GPR 3 ""))]
+ "reload_completed"
[(set (match_dup 3)
- (plus:SI (match_dup 1)
+ (plus:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(compare:CC (match_dup 3)
(const_int 0)))]
"")
-(define_insn "*addsi3_internal3"
+(define_insn "*add<mode>3_internal3"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
- (match_operand:SI 2 "reg_or_short_operand" "r,I,r,I"))
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I,r,I"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
- (plus:SI (match_dup 1)
- (match_dup 2)))]
- "TARGET_32BIT"
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:P (match_dup 1)
+ (match_dup 2)))]
+ ""
"@
{cax.|add.} %0,%1,%2
{ai.|addic.} %0,%1,%2
@@ -1066,15 +1534,15 @@
(define_split
[(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_short_operand" ""))
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_short_operand" ""))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT && reload_completed"
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (plus:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
[(set (match_dup 0)
- (plus:SI (match_dup 1)
- (match_dup 2)))
+ (plus:P (match_dup 1)
+ (match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
@@ -1085,34 +1553,43 @@
;; add should be last in case the result gets used in an address.
(define_split
- [(set (match_operand:SI 0 "gpc_reg_operand" "")
- (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "non_add_cint_operand" "")))]
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "non_add_cint_operand" "")))]
""
- [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3)))
- (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))]
-"
+ [(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))]
{
HOST_WIDE_INT val = INTVAL (operands[2]);
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
- HOST_WIDE_INT rest = trunc_int_for_mode (val - low, SImode);
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
- operands[3] = GEN_INT (rest);
operands[4] = GEN_INT (low);
-}")
+ if (<MODE>mode == SImode || satisfies_constraint_L (GEN_INT (rest)))
+ operands[3] = GEN_INT (rest);
+ else if (! no_new_pseudos)
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ emit_move_insn (operands[3], operands[2]);
+ emit_insn (gen_adddi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ else
+ FAIL;
+})
-(define_insn "one_cmplsi2"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (not:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
""
"nor %0,%1,%1")
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
- (clobber (match_scratch:SI 2 "=r,r"))]
- "TARGET_32BIT"
+ (clobber (match_scratch:P 2 "=r,r"))]
+ ""
"@
nor. %2,%1,%1
#"
@@ -1121,12 +1598,12 @@
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" ""))
(const_int 0)))
- (clobber (match_scratch:SI 2 ""))]
- "TARGET_32BIT && reload_completed"
+ (clobber (match_scratch:P 2 ""))]
+ "reload_completed"
[(set (match_dup 2)
- (not:SI (match_dup 1)))
+ (not:P (match_dup 1)))
(set (match_dup 0)
(compare:CC (match_dup 2)
(const_int 0)))]
@@ -1134,11 +1611,11 @@
(define_insn ""
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (not:SI (match_dup 1)))]
- "TARGET_32BIT"
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (not:P (match_dup 1)))]
+ ""
"@
nor. %0,%1,%1
#"
@@ -1147,13 +1624,13 @@
(define_split
[(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" ""))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (not:SI (match_dup 1)))]
- "TARGET_32BIT && reload_completed"
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (not:P (match_dup 1)))]
+ "reload_completed"
[(set (match_dup 0)
- (not:SI (match_dup 1)))
+ (not:P (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
@@ -1167,9 +1644,9 @@
"{sf%I1|subf%I1c} %0,%2,%1")
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (minus:SI (match_operand:SI 1 "reg_or_short_operand" "r,I")
- (match_operand:SI 2 "gpc_reg_operand" "r,r")))]
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
+ (minus:GPR (match_operand:GPR 1 "reg_or_short_operand" "r,I")
+ (match_operand:GPR 2 "gpc_reg_operand" "r,r")))]
"TARGET_POWERPC"
"@
subf %0,%2,%1
@@ -1190,11 +1667,11 @@
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "gpc_reg_operand" "r,r"))
(const_int 0)))
- (clobber (match_scratch:SI 3 "=r,r"))]
- "TARGET_POWERPC && TARGET_32BIT"
+ (clobber (match_scratch:P 3 "=r,r"))]
+ "TARGET_POWERPC"
"@
subf. %3,%2,%1
#"
@@ -1203,13 +1680,13 @@
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "gpc_reg_operand" ""))
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "gpc_reg_operand" ""))
(const_int 0)))
- (clobber (match_scratch:SI 3 ""))]
- "TARGET_32BIT && reload_completed"
+ (clobber (match_scratch:P 3 ""))]
+ "reload_completed"
[(set (match_dup 3)
- (minus:SI (match_dup 1)
+ (minus:P (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(compare:CC (match_dup 3)
@@ -1232,13 +1709,13 @@
(define_insn ""
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
- (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "gpc_reg_operand" "r,r"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (minus:SI (match_dup 1)
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (minus:P (match_dup 1)
(match_dup 2)))]
- "TARGET_POWERPC && TARGET_32BIT"
+ "TARGET_POWERPC"
"@
subf. %0,%2,%1
#"
@@ -1247,32 +1724,32 @@
(define_split
[(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "gpc_reg_operand" ""))
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "gpc_reg_operand" ""))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (minus:SI (match_dup 1)
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (minus:P (match_dup 1)
(match_dup 2)))]
- "TARGET_32BIT && reload_completed"
+ "reload_completed"
[(set (match_dup 0)
- (minus:SI (match_dup 1)
+ (minus:P (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
"")
-(define_expand "subsi3"
- [(set (match_operand:SI 0 "gpc_reg_operand" "")
- (minus:SI (match_operand:SI 1 "reg_or_short_operand" "")
- (match_operand:SI 2 "reg_or_arith_cint_operand" "")))]
+(define_expand "sub<mode>3"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (minus:SDI (match_operand:SDI 1 "reg_or_short_operand" "")
+ (match_operand:SDI 2 "reg_or_sub_cint_operand" "")))]
""
"
{
if (GET_CODE (operands[2]) == CONST_INT)
{
- emit_insn (gen_addsi3 (operands[0], operands[1],
- negate_rtx (SImode, operands[2])));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1],
+ negate_rtx (<MODE>mode, operands[2])));
DONE;
}
}")
@@ -1555,18 +2032,24 @@
(set (match_dup 0) (minus:SI (match_dup 2) (match_dup 0)))]
"")
-(define_insn "negsi2"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+(define_expand "neg<mode>2"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (neg:SDI (match_operand:SDI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn "*neg<mode>2_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
""
"neg %0,%1")
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
- (clobber (match_scratch:SI 2 "=r,r"))]
- "TARGET_32BIT"
+ (clobber (match_scratch:P 2 "=r,r"))]
+ ""
"@
neg. %2,%1
#"
@@ -1575,12 +2058,12 @@
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" ""))
(const_int 0)))
- (clobber (match_scratch:SI 2 ""))]
- "TARGET_32BIT && reload_completed"
+ (clobber (match_scratch:P 2 ""))]
+ "reload_completed"
[(set (match_dup 2)
- (neg:SI (match_dup 1)))
+ (neg:P (match_dup 1)))
(set (match_dup 0)
(compare:CC (match_dup 2)
(const_int 0)))]
@@ -1588,11 +2071,11 @@
(define_insn ""
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (neg:SI (match_dup 1)))]
- "TARGET_32BIT"
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (match_dup 1)))]
+ ""
"@
neg. %0,%1
#"
@@ -1601,56 +2084,85 @@
(define_split
[(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" ""))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (neg:SI (match_dup 1)))]
- "TARGET_32BIT && reload_completed"
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (neg:P (match_dup 1)))]
+ "reload_completed"
[(set (match_dup 0)
- (neg:SI (match_dup 1)))
+ (neg:P (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
"")
-(define_insn "clzsi2"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (clz:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+(define_insn "clz<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (clz:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
""
- "{cntlz|cntlzw} %0,%1")
+ "{cntlz|cntlz<wd>} %0,%1")
-(define_expand "ctzsi2"
+(define_expand "ctz<mode>2"
[(set (match_dup 2)
- (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
- (parallel [(set (match_dup 3) (and:SI (match_dup 1)
- (match_dup 2)))
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))
+ (parallel [(set (match_dup 3) (and:GPR (match_dup 1)
+ (match_dup 2)))
(clobber (scratch:CC))])
- (set (match_dup 4) (clz:SI (match_dup 3)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (minus:SI (const_int 31) (match_dup 4)))]
+ (set (match_dup 4) (clz:GPR (match_dup 3)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (minus:GPR (match_dup 5) (match_dup 4)))]
""
{
- operands[2] = gen_reg_rtx (SImode);
- operands[3] = gen_reg_rtx (SImode);
- operands[4] = gen_reg_rtx (SImode);
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - 1);
})
-
-(define_expand "ffssi2"
+
+(define_expand "ffs<mode>2"
[(set (match_dup 2)
- (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
- (parallel [(set (match_dup 3) (and:SI (match_dup 1)
- (match_dup 2)))
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))
+ (parallel [(set (match_dup 3) (and:GPR (match_dup 1)
+ (match_dup 2)))
(clobber (scratch:CC))])
- (set (match_dup 4) (clz:SI (match_dup 3)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (minus:SI (const_int 32) (match_dup 4)))]
+ (set (match_dup 4) (clz:GPR (match_dup 3)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (minus:GPR (match_dup 5) (match_dup 4)))]
""
{
- operands[2] = gen_reg_rtx (SImode);
- operands[3] = gen_reg_rtx (SImode);
- operands[4] = gen_reg_rtx (SImode);
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode));
})
-
+
+(define_expand "popcount<mode>2"
+ [(set (match_dup 2)
+ (unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
+ UNSPEC_POPCNTB))
+ (set (match_dup 3)
+ (mult:GPR (match_dup 2) (match_dup 4)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (lshiftrt:GPR (match_dup 3) (match_dup 5)))]
+ "TARGET_POPCNTB"
+ {
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = force_reg (<MODE>mode,
+ <MODE>mode == SImode
+ ? GEN_INT (0x01010101)
+ : GEN_INT ((HOST_WIDE_INT)
+ 0x01010101 << 32 | 0x01010101));
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - 8);
+ })
+
+(define_insn "popcntb<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
+ UNSPEC_POPCNTB))]
+ "TARGET_POPCNTB"
+ "popcntb %0,%1")
+
(define_expand "mulsi3"
[(use (match_operand:SI 0 "gpc_reg_operand" ""))
(use (match_operand:SI 1 "gpc_reg_operand" ""))
@@ -1674,10 +2186,10 @@
"@
{muls|mullw} %0,%1,%2
{muli|mulli} %0,%1,%2"
- [(set (attr "type")
+ [(set (attr "type")
(cond [(match_operand:SI 2 "s8bit_cint_operand" "")
(const_string "imul3")
- (match_operand:SI 2 "short_cint_operand" "")
+ (match_operand:SI 2 "short_cint_operand" "")
(const_string "imul2")]
(const_string "imul")))])
@@ -1689,10 +2201,10 @@
"@
{muls|mullw} %0,%1,%2
{muli|mulli} %0,%1,%2"
- [(set (attr "type")
+ [(set (attr "type")
(cond [(match_operand:SI 2 "s8bit_cint_operand" "")
(const_string "imul3")
- (match_operand:SI 2 "short_cint_operand" "")
+ (match_operand:SI 2 "short_cint_operand" "")
(const_string "imul2")]
(const_string "imul")))])
@@ -1848,10 +2360,10 @@
"divs %0,%1,%2"
[(set_attr "type" "idiv")])
-(define_expand "udivsi3"
- [(set (match_operand:SI 0 "gpc_reg_operand" "")
- (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "gpc_reg_operand" "")))]
+(define_expand "udiv<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (udiv:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "gpc_reg_operand" "")))]
"TARGET_POWERPC || (! TARGET_POWER && ! TARGET_POWERPC)"
"
{
@@ -1880,21 +2392,21 @@
[(set_attr "type" "idiv")])
(define_insn "*udivsi3_no_mq"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "gpc_reg_operand" "r")))]
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (udiv:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
"TARGET_POWERPC && ! TARGET_POWER"
- "divwu %0,%1,%2"
+ "div<wd>u %0,%1,%2"
[(set_attr "type" "idiv")])
;; For powers of two we can do srai/aze for divide and then adjust for
;; modulus. If it isn't a power of two, FAIL on POWER so divmodsi4 will be
;; used; for PowerPC, force operands into register and do a normal divide;
;; for AIX common-mode, use quoss call on register operands.
-(define_expand "divsi3"
- [(set (match_operand:SI 0 "gpc_reg_operand" "")
- (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_cint_operand" "")))]
+(define_expand "div<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "reg_or_cint_operand" "")))]
""
"
{
@@ -1904,7 +2416,7 @@
;
else if (TARGET_POWERPC)
{
- operands[2] = force_reg (SImode, operands[2]);
+ operands[2] = force_reg (<MODE>mode, operands[2]);
if (TARGET_POWER)
{
emit_insn (gen_divsi3_mq (operands[0], operands[1], operands[2]));
@@ -1932,18 +2444,18 @@
"divw %0,%1,%2"
[(set_attr "type" "idiv")])
-(define_insn "*divsi3_no_mq"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "gpc_reg_operand" "r")))]
+(define_insn "*div<mode>3_no_mq"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
"TARGET_POWERPC && ! TARGET_POWER"
- "divw %0,%1,%2"
+ "div<wd> %0,%1,%2"
[(set_attr "type" "idiv")])
-(define_expand "modsi3"
- [(use (match_operand:SI 0 "gpc_reg_operand" ""))
- (use (match_operand:SI 1 "gpc_reg_operand" ""))
- (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+(define_expand "mod<mode>3"
+ [(use (match_operand:GPR 0 "gpc_reg_operand" ""))
+ (use (match_operand:GPR 1 "gpc_reg_operand" ""))
+ (use (match_operand:GPR 2 "reg_or_cint_operand" ""))]
""
"
{
@@ -1956,45 +2468,47 @@
|| (i = exact_log2 (INTVAL (operands[2]))) < 0)
FAIL;
- temp1 = gen_reg_rtx (SImode);
- temp2 = gen_reg_rtx (SImode);
+ temp1 = gen_reg_rtx (<MODE>mode);
+ temp2 = gen_reg_rtx (<MODE>mode);
- emit_insn (gen_divsi3 (temp1, operands[1], operands[2]));
- emit_insn (gen_ashlsi3 (temp2, temp1, GEN_INT (i)));
- emit_insn (gen_subsi3 (operands[0], operands[1], temp2));
+ emit_insn (gen_div<mode>3 (temp1, operands[1], operands[2]));
+ emit_insn (gen_ashl<mode>3 (temp2, temp1, GEN_INT (i)));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], temp2));
DONE;
}")
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "exact_log2_cint_operand" "N")))]
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "exact_log2_cint_operand" "N")))]
""
- "{srai|srawi} %0,%1,%p2\;{aze|addze} %0,%0"
- [(set_attr "length" "8")])
+ "{srai|sra<wd>i} %0,%1,%p2\;{aze|addze} %0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "exact_log2_cint_operand" "N,N"))
+ (compare:CC (div:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "exact_log2_cint_operand" "N,N"))
(const_int 0)))
- (clobber (match_scratch:SI 3 "=r,r"))]
+ (clobber (match_scratch:P 3 "=r,r"))]
""
"@
- {srai|srawi} %3,%1,%p2\;{aze.|addze.} %3,%3
+ {srai|sra<wd>i} %3,%1,%p2\;{aze.|addze.} %3,%3
#"
[(set_attr "type" "compare")
(set_attr "length" "8,12")])
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "exact_log2_cint_operand" ""))
+ (compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "exact_log2_cint_operand"
+ ""))
(const_int 0)))
- (clobber (match_scratch:SI 3 ""))]
+ (clobber (match_scratch:GPR 3 ""))]
"reload_completed"
[(set (match_dup 3)
- (div:SI (match_dup 1) (match_dup 2)))
+ (div:<MODE> (match_dup 1) (match_dup 2)))
(set (match_dup 0)
(compare:CC (match_dup 3)
(const_int 0)))]
@@ -2002,28 +2516,29 @@
(define_insn ""
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
- (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "exact_log2_cint_operand" "N,N"))
+ (compare:CC (div:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "exact_log2_cint_operand" "N,N"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (div:SI (match_dup 1) (match_dup 2)))]
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (div:P (match_dup 1) (match_dup 2)))]
""
"@
- {srai|srawi} %0,%1,%p2\;{aze.|addze.} %0,%0
+ {srai|sra<wd>i} %0,%1,%p2\;{aze.|addze.} %0,%0
#"
[(set_attr "type" "compare")
(set_attr "length" "8,12")])
(define_split
[(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "exact_log2_cint_operand" ""))
+ (compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "exact_log2_cint_operand"
+ ""))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (div:SI (match_dup 1) (match_dup 2)))]
+ (set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (div:GPR (match_dup 1) (match_dup 2)))]
"reload_completed"
[(set (match_dup 0)
- (div:SI (match_dup 1) (match_dup 2)))
+ (div:<MODE> (match_dup 1) (match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
@@ -2215,7 +2730,8 @@
and %0,%1,%2
{rlinm|rlwinm} %0,%1,0,%m2,%M2
{andil.|andi.} %0,%1,%b2
- {andiu.|andis.} %0,%1,%u2")
+ {andiu.|andis.} %0,%1,%u2"
+ [(set_attr "type" "*,*,compare,compare")])
;; Note to set cr's other than cr0 we do the and immediate and then
;; the test again -- this avoids a mfcr which on the higher end
@@ -2263,15 +2779,15 @@
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "and_operand" ""))
+ (compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "and_operand" ""))
(const_int 0)))
- (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:GPR 3 ""))
(clobber (match_scratch:CC 4 ""))]
"reload_completed"
[(parallel [(set (match_dup 3)
- (and:SI (match_dup 1)
- (match_dup 2)))
+ (and:<MODE> (match_dup 1)
+ (match_dup 2)))
(clobber (match_dup 4))])
(set (match_dup 0)
(compare:CC (match_dup 3)
@@ -2515,7 +3031,7 @@
(const_int 0)))]
"")
-;; Split a logical operation that we can't do in one insn into two insns,
+;; Split a logical operation that we can't do in one insn into two insns,
;; each of which does one 16-bit part. This is used by combine.
(define_split
@@ -2530,11 +3046,11 @@
{
rtx i;
i = GEN_INT (INTVAL (operands[2]) & (~ (HOST_WIDE_INT) 0xffff));
- operands[4] = gen_rtx (GET_CODE (operands[3]), SImode,
- operands[1], i);
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[1], i);
i = GEN_INT (INTVAL (operands[2]) & 0xffff);
- operands[5] = gen_rtx (GET_CODE (operands[3]), SImode,
- operands[0], i);
+ operands[5] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[0], i);
}")
(define_insn "*boolcsi3_internal1"
@@ -2873,9 +3389,12 @@
{
/* Do not handle 16/8 bit structures that fit in HI/QI modes directly, since
the (SUBREG:SI (REG:HI xxx)) that is otherwise generated can confuse the
- compiler if the address of the structure is taken later. */
+ compiler if the address of the structure is taken later. Likewise, do
+ not handle invalid E500 subregs. */
if (GET_CODE (operands[0]) == SUBREG
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD))
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD
+ || ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && invalid_e500_subreg (operands[0], GET_MODE (operands[0])))))
FAIL;
if (TARGET_POWERPC64 && GET_MODE (operands[0]) == DImode)
@@ -2906,7 +3425,7 @@
[(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
(match_operand:SI 1 "const_int_operand" "i")
(match_operand:SI 2 "const_int_operand" "i"))
- (ashift:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (rotate:SI (match_operand:SI 3 "gpc_reg_operand" "r")
(match_operand:SI 4 "const_int_operand" "i")))]
"(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
"*
@@ -2981,6 +3500,45 @@
}"
[(set_attr "type" "insert_word")])
+;; combine patterns for rlwimi
+(define_insn "*insvsi_internal5"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (match_operand:SI 1 "mask_operand" "i"))
+ (and:SI (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 5 "mask_operand" "i"))))]
+ "TARGET_POWERPC && INTVAL(operands[1]) == ~INTVAL(operands[5])"
+ "*
+{
+ int me = extract_ME(operands[5]);
+ int mb = extract_MB(operands[5]);
+ operands[4] = GEN_INT(32 - INTVAL(operands[2]));
+ operands[2] = GEN_INT(mb);
+ operands[1] = GEN_INT(me);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal6"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 5 "mask_operand" "i"))
+ (and:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (match_operand:SI 1 "mask_operand" "i"))))]
+ "TARGET_POWERPC && INTVAL(operands[1]) == ~INTVAL(operands[5])"
+ "*
+{
+ int me = extract_ME(operands[5]);
+ int mb = extract_MB(operands[5]);
+ operands[4] = GEN_INT(32 - INTVAL(operands[2]));
+ operands[2] = GEN_INT(mb);
+ operands[1] = GEN_INT(me);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
(define_insn "insvdi"
[(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
(match_operand:SI 1 "const_int_operand" "i")
@@ -2996,6 +3554,46 @@
return \"rldimi %0,%3,%H1,%H2\";
}")
+(define_insn "*insvdi_internal2"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (ashiftrt:DI (match_operand:DI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "TARGET_POWERPC64
+ && insvdi_rshift_rlwimi_p (operands[1], operands[2], operands[4])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 63;
+ int start = (INTVAL (operands[2]) & 63) - 32;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[4] = GEN_INT (64 - shift - start - size);
+ operands[2] = GEN_INT (start);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"rlwimi %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_insn "*insvdi_internal3"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (lshiftrt:DI (match_operand:DI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "TARGET_POWERPC64
+ && insvdi_rshift_rlwimi_p (operands[1], operands[2], operands[4])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 63;
+ int start = (INTVAL (operands[2]) & 63) - 32;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[4] = GEN_INT (64 - shift - start - size);
+ operands[2] = GEN_INT (start);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"rlwimi %0,%3,%h4,%h2,%h1\";
+}")
+
(define_expand "extzv"
[(set (match_operand 0 "gpc_reg_operand" "")
(zero_extract (match_operand 1 "gpc_reg_operand" "")
@@ -3183,7 +3781,8 @@
operands[3] = GEN_INT (start + size);
operands[2] = GEN_INT (64 - size);
return \"rldicl. %4,%1,%3,%2\";
-}")
+}"
+ [(set_attr "type" "compare")])
(define_insn "*extzvdi_internal2"
[(set (match_operand:CC 4 "gpc_reg_operand" "=x")
@@ -3205,7 +3804,8 @@
operands[3] = GEN_INT (start + size);
operands[2] = GEN_INT (64 - size);
return \"rldicl. %0,%1,%3,%2\";
-}")
+}"
+ [(set_attr "type" "compare")])
(define_insn "rotlsi3"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
@@ -3650,7 +4250,7 @@
(const_int 0)))]
"")
-(define_insn ""
+(define_insn "rlwinm"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "const_int_operand" "i"))
@@ -4317,20 +4917,35 @@
;; this case, we just lose precision that we would have otherwise gotten but
;; is not guaranteed. Perhaps this should be tightened up at some point.
-(define_insn "extendsfdf2"
- [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
- (float_extend:DF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float_extend:DF (match_operand:SF 1 "reg_or_none500mem_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn_and_split "*extendsfdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f,f")
+ (float_extend:DF (match_operand:SF 1 "reg_or_mem_operand" "0,f,m")))]
"TARGET_HARD_FLOAT && TARGET_FPRS"
- "*
+ "@
+ #
+ fmr %0,%1
+ lfs%U1%X1 %0,%1"
+ "&& reload_completed && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
{
- if (REGNO (operands[0]) == REGNO (operands[1]))
- return \"\";
- else
- return \"fmr %0,%1\";
-}"
- [(set_attr "type" "fp")])
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+}
+ [(set_attr "type" "fp,fp,fpload")])
+
+(define_expand "truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
-(define_insn "truncdfsf2"
+(define_insn "*truncdfsf2_fpr"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f")
(float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "f")))]
"TARGET_HARD_FLOAT && TARGET_FPRS"
@@ -4446,12 +5061,26 @@
"{fm|fmul} %0,%1,%2"
[(set_attr "type" "dmul")])
+(define_insn "fres"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+ "TARGET_PPC_GFXOPT && flag_finite_math_only"
+ "fres %0,%1"
+ [(set_attr "type" "fp")])
+
(define_expand "divsf3"
[(set (match_operand:SF 0 "gpc_reg_operand" "")
(div:SF (match_operand:SF 1 "gpc_reg_operand" "")
(match_operand:SF 2 "gpc_reg_operand" "")))]
"TARGET_HARD_FLOAT"
- "")
+{
+ if (swdiv && !optimize_size && TARGET_PPC_GFXOPT
+ && flag_finite_math_only && !flag_trapping_math)
+ {
+ rs6000_emit_swdivsf (operands[0], operands[1], operands[2]);
+ DONE;
+ }
+})
(define_insn ""
[(set (match_operand:SF 0 "gpc_reg_operand" "=f")
@@ -4581,7 +5210,7 @@
"! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
&& ! HONOR_SIGNED_ZEROS (SFmode)"
"{fnms|fnmsub} %0,%1,%2,%3"
- [(set_attr "type" "fp")])
+ [(set_attr "type" "dmul")])
(define_expand "sqrtsf2"
[(set (match_operand:SF 0 "gpc_reg_operand" "")
@@ -4603,26 +5232,62 @@
"fsqrt %0,%1"
[(set_attr "type" "dsqrt")])
+(define_expand "copysignsf3"
+ [(set (match_dup 3)
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "")))
+ (set (match_dup 4)
+ (neg:SF (abs:SF (match_dup 1))))
+ (set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_dup 5))
+ (match_dup 3)
+ (match_dup 4)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS
+ && !HONOR_NANS (SFmode) && !HONOR_SIGNED_ZEROS (SFmode)"
+ {
+ operands[3] = gen_reg_rtx (SFmode);
+ operands[4] = gen_reg_rtx (SFmode);
+ operands[5] = CONST0_RTX (SFmode);
+ })
+
+(define_expand "copysigndf3"
+ [(set (match_dup 3)
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "")))
+ (set (match_dup 4)
+ (neg:DF (abs:DF (match_dup 1))))
+ (set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_dup 5))
+ (match_dup 3)
+ (match_dup 4)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS
+ && !HONOR_NANS (DFmode) && !HONOR_SIGNED_ZEROS (DFmode)"
+ {
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = CONST0_RTX (DFmode);
+ })
+
;; For MIN, MAX, and conditional move, we use DEFINE_EXPAND's that involve a
;; fsel instruction and some auxiliary computations. Then we just have a
;; single DEFINE_INSN for fsel and the define_splits to make them if made by
;; combine.
-(define_expand "maxsf3"
+(define_expand "smaxsf3"
[(set (match_operand:SF 0 "gpc_reg_operand" "")
(if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
(match_operand:SF 2 "gpc_reg_operand" ""))
(match_dup 1)
(match_dup 2)))]
- "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
"{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
-(define_expand "minsf3"
+(define_expand "sminsf3"
[(set (match_operand:SF 0 "gpc_reg_operand" "")
(if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
(match_operand:SF 2 "gpc_reg_operand" ""))
(match_dup 2)
(match_dup 1)))]
- "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
"{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
(define_split
@@ -4630,10 +5295,10 @@
(match_operator:SF 3 "min_max_operator"
[(match_operand:SF 1 "gpc_reg_operand" "")
(match_operand:SF 2 "gpc_reg_operand" "")]))]
- "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
[(const_int 0)]
"
-{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
operands[1], operands[2]);
DONE;
}")
@@ -4720,28 +5385,47 @@
"fsel %0,%1,%2,%3"
[(set_attr "type" "fp")])
-(define_insn "negdf2"
+(define_expand "negdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*negdf2_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(neg:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
"TARGET_HARD_FLOAT && TARGET_FPRS"
"fneg %0,%1"
[(set_attr "type" "fp")])
-(define_insn "absdf2"
+(define_expand "absdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*absdf2_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(abs:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
"TARGET_HARD_FLOAT && TARGET_FPRS"
"fabs %0,%1"
[(set_attr "type" "fp")])
-(define_insn ""
+(define_insn "*nabsdf2_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f"))))]
"TARGET_HARD_FLOAT && TARGET_FPRS"
"fnabs %0,%1"
[(set_attr "type" "fp")])
-(define_insn "adddf3"
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*adddf3_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(plus:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
(match_operand:DF 2 "gpc_reg_operand" "f")))]
@@ -4749,7 +5433,14 @@
"{fa|fadd} %0,%1,%2"
[(set_attr "type" "fp")])
-(define_insn "subdf3"
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*subdf3_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(minus:DF (match_operand:DF 1 "gpc_reg_operand" "f")
(match_operand:DF 2 "gpc_reg_operand" "f")))]
@@ -4757,7 +5448,14 @@
"{fs|fsub} %0,%1,%2"
[(set_attr "type" "fp")])
-(define_insn "muldf3"
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*muldf3_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
(match_operand:DF 2 "gpc_reg_operand" "f")))]
@@ -4765,7 +5463,28 @@
"{fm|fmul} %0,%1,%2"
[(set_attr "type" "dmul")])
-(define_insn "divdf3"
+(define_insn "fred"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+ "TARGET_POPCNTB && flag_finite_math_only"
+ "fre %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "divdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+{
+ if (swdiv && !optimize_size && TARGET_POPCNTB
+ && flag_finite_math_only && !flag_trapping_math)
+ {
+ rs6000_emit_swdivdf (operands[0], operands[1], operands[2]);
+ DONE;
+ }
+})
+
+(define_insn "*divdf3_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(div:DF (match_operand:DF 1 "gpc_reg_operand" "f")
(match_operand:DF 2 "gpc_reg_operand" "f")))]
@@ -4826,7 +5545,7 @@
(minus:DF (match_operand:DF 3 "gpc_reg_operand" "f")
(mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
(match_operand:DF 2 "gpc_reg_operand" "f"))))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
&& ! HONOR_SIGNED_ZEROS (DFmode)"
"{fnms|fnmsub} %0,%1,%2,%3"
[(set_attr "type" "dmul")])
@@ -4839,24 +5558,24 @@
[(set_attr "type" "dsqrt")])
;; The conditional move instructions allow us to perform max and min
-;; operations even when
+;; operations even when
-(define_expand "maxdf3"
+(define_expand "smaxdf3"
[(set (match_operand:DF 0 "gpc_reg_operand" "")
(if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
(match_operand:DF 2 "gpc_reg_operand" ""))
(match_dup 1)
(match_dup 2)))]
- "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
"{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
-(define_expand "mindf3"
+(define_expand "smindf3"
[(set (match_operand:DF 0 "gpc_reg_operand" "")
(if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
(match_operand:DF 2 "gpc_reg_operand" ""))
(match_dup 2)
(match_dup 1)))]
- "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
"{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
(define_split
@@ -4864,10 +5583,10 @@
(match_operator:DF 3 "min_max_operator"
[(match_operand:DF 1 "gpc_reg_operand" "")
(match_operand:DF 2 "gpc_reg_operand" "")]))]
- "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
[(const_int 0)]
"
-{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
operands[1], operands[2]);
DONE;
}")
@@ -4936,6 +5655,11 @@
"TARGET_HARD_FLOAT && TARGET_FPRS"
"
{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_floatsidf2 (operands[0], operands[1]));
+ DONE;
+ }
if (TARGET_POWERPC64)
{
rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
@@ -4952,7 +5676,7 @@
operands[6] = gen_reg_rtx (SImode);
}")
-(define_insn "*floatsidf2_internal"
+(define_insn_and_split "*floatsidf2_internal"
[(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
(float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
(use (match_operand:SI 2 "gpc_reg_operand" "r"))
@@ -4962,45 +5686,29 @@
(clobber (match_operand:SI 6 "gpc_reg_operand" "=&r"))]
"! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
"#"
- [(set_attr "length" "24")])
-
-(define_split
- [(set (match_operand:DF 0 "gpc_reg_operand" "")
- (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
- (use (match_operand:SI 2 "gpc_reg_operand" ""))
- (use (match_operand:DF 3 "gpc_reg_operand" ""))
- (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
- (clobber (match_operand:DF 5 "gpc_reg_operand" ""))
- (clobber (match_operand:SI 6 "gpc_reg_operand" ""))]
- "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
- [(set (match_operand:DF 0 "gpc_reg_operand" "")
- (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
- (use (match_operand:SI 2 "gpc_reg_operand" ""))
- (use (match_operand:DF 3 "gpc_reg_operand" ""))
- (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
- (clobber (match_operand:DF 5 "gpc_reg_operand" ""))
- (clobber (match_operand:SI 6 "gpc_reg_operand" ""))]
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[4]))"
+ [(pc)]
"
{
rtx lowword, highword;
- if (GET_CODE (operands[4]) != MEM)
- abort();
- highword = XEXP (operands[4], 0);
- lowword = plus_constant (highword, 4);
+ gcc_assert (MEM_P (operands[4]));
+ highword = adjust_address (operands[4], SImode, 0);
+ lowword = adjust_address (operands[4], SImode, 4);
if (! WORDS_BIG_ENDIAN)
{
rtx tmp;
tmp = highword; highword = lowword; lowword = tmp;
}
- emit_insn (gen_xorsi3 (operands[6], operands[1],
+ emit_insn (gen_xorsi3 (operands[6], operands[1],
GEN_INT (~ (HOST_WIDE_INT) 0x7fffffff)));
- emit_move_insn (gen_rtx_MEM (SImode, lowword), operands[6]);
- emit_move_insn (gen_rtx_MEM (SImode, highword), operands[2]);
+ emit_move_insn (lowword, operands[6]);
+ emit_move_insn (highword, operands[2]);
emit_move_insn (operands[5], operands[4]);
emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
DONE;
-}")
+}"
+ [(set_attr "length" "24")])
(define_expand "floatunssisf2"
[(set (match_operand:SF 0 "gpc_reg_operand" "")
@@ -5015,9 +5723,14 @@
(use (match_dup 3))
(clobber (match_dup 4))
(clobber (match_dup 5))])]
- "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
"
{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_floatunssidf2 (operands[0], operands[1]));
+ DONE;
+ }
if (TARGET_POWERPC64)
{
rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
@@ -5034,7 +5747,7 @@
operands[5] = gen_reg_rtx (DFmode);
}")
-(define_insn "*floatunssidf2_internal"
+(define_insn_and_split "*floatunssidf2_internal"
[(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
(unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
(use (match_operand:SI 2 "gpc_reg_operand" "r"))
@@ -5043,100 +5756,173 @@
(clobber (match_operand:DF 5 "gpc_reg_operand" "=&f"))]
"! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
"#"
- [(set_attr "length" "20")])
-
-(define_split
- [(set (match_operand:DF 0 "gpc_reg_operand" "")
- (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
- (use (match_operand:SI 2 "gpc_reg_operand" ""))
- (use (match_operand:DF 3 "gpc_reg_operand" ""))
- (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
- (clobber (match_operand:DF 5 "gpc_reg_operand" ""))]
- "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
- [(set (match_operand:DF 0 "gpc_reg_operand" "")
- (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
- (use (match_operand:SI 2 "gpc_reg_operand" ""))
- (use (match_operand:DF 3 "gpc_reg_operand" ""))
- (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
- (clobber (match_operand:DF 5 "gpc_reg_operand" ""))]
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[4]))"
+ [(pc)]
"
{
rtx lowword, highword;
- if (GET_CODE (operands[4]) != MEM)
- abort();
- highword = XEXP (operands[4], 0);
- lowword = plus_constant (highword, 4);
+ gcc_assert (MEM_P (operands[4]));
+ highword = adjust_address (operands[4], SImode, 0);
+ lowword = adjust_address (operands[4], SImode, 4);
if (! WORDS_BIG_ENDIAN)
{
rtx tmp;
tmp = highword; highword = lowword; lowword = tmp;
}
- emit_move_insn (gen_rtx_MEM (SImode, lowword), operands[1]);
- emit_move_insn (gen_rtx_MEM (SImode, highword), operands[2]);
+ emit_move_insn (lowword, operands[1]);
+ emit_move_insn (highword, operands[2]);
emit_move_insn (operands[5], operands[4]);
emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
DONE;
-}")
+}"
+ [(set_attr "length" "20")])
(define_expand "fix_truncdfsi2"
- [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ [(parallel [(set (match_operand:SI 0 "fix_trunc_dest_operand" "")
(fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
(clobber (match_dup 2))
(clobber (match_dup 3))])]
- "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "(TARGET_POWER2 || TARGET_POWERPC)
+ && TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
"
{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_fix_truncdfsi2 (operands[0], operands[1]));
+ DONE;
+ }
operands[2] = gen_reg_rtx (DImode);
+ if (TARGET_PPC_GFXOPT)
+ {
+ rtx orig_dest = operands[0];
+ if (! memory_operand (orig_dest, GET_MODE (orig_dest)))
+ operands[0] = assign_stack_temp (SImode, GET_MODE_SIZE (SImode), 0);
+ emit_insn (gen_fix_truncdfsi2_internal_gfxopt (operands[0], operands[1],
+ operands[2]));
+ if (operands[0] != orig_dest)
+ emit_move_insn (orig_dest, operands[0]);
+ DONE;
+ }
operands[3] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
}")
-(define_insn "*fix_truncdfsi2_internal"
+(define_insn_and_split "*fix_truncdfsi2_internal"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
(clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))
(clobber (match_operand:DI 3 "memory_operand" "=o"))]
"(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS"
"#"
- [(set_attr "length" "16")])
-
-(define_split
- [(set (match_operand:SI 0 "gpc_reg_operand" "")
- (fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
- (clobber (match_operand:DI 2 "gpc_reg_operand" ""))
- (clobber (match_operand:DI 3 "offsettable_mem_operand" ""))]
- "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS"
- [(set (match_operand:SI 0 "gpc_reg_operand" "")
- (fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
- (clobber (match_operand:DI 2 "gpc_reg_operand" ""))
- (clobber (match_operand:DI 3 "offsettable_mem_operand" ""))]
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[3]))"
+ [(pc)]
"
{
rtx lowword;
- if (GET_CODE (operands[3]) != MEM)
- abort();
- lowword = XEXP (operands[3], 0);
- if (WORDS_BIG_ENDIAN)
- lowword = plus_constant (lowword, 4);
+ gcc_assert (MEM_P (operands[3]));
+ lowword = adjust_address (operands[3], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
emit_insn (gen_fctiwz (operands[2], operands[1]));
emit_move_insn (operands[3], operands[2]);
- emit_move_insn (operands[0], gen_rtx_MEM (SImode, lowword));
+ emit_move_insn (operands[0], lowword);
DONE;
-}")
+}"
+ [(set_attr "length" "16")])
+
+(define_insn_and_split "fix_truncdfsi2_internal_gfxopt"
+ [(set (match_operand:SI 0 "memory_operand" "=Z")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_PPC_GFXOPT"
+ "#"
+ "&& 1"
+ [(pc)]
+ "
+{
+ emit_insn (gen_fctiwz (operands[2], operands[1]));
+ emit_insn (gen_stfiwx (operands[0], operands[2]));
+ DONE;
+}"
+ [(set_attr "length" "16")])
; Here, we use (set (reg) (unspec:DI [(fix:SI ...)] UNSPEC_FCTIWZ))
; rather than (set (subreg:SI (reg)) (fix:SI ...))
; because the first makes it clear that operand 0 is not live
; before the instruction.
(define_insn "fctiwz"
- [(set (match_operand:DI 0 "gpc_reg_operand" "=*f")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=f")
(unspec:DI [(fix:SI (match_operand:DF 1 "gpc_reg_operand" "f"))]
UNSPEC_FCTIWZ))]
"(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS"
"{fcirz|fctiwz} %0,%1"
[(set_attr "type" "fp")])
+(define_insn "btruncdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "friz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "btruncsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "friz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "ceildf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIP))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frip %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "ceilsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIP))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frip %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "floordf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIM))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frim %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "floorsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIM))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frim %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "rounddf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIN))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frin %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "roundsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIN))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frin %0,%1"
+ [(set_attr "type" "fp")])
+
+; An UNSPEC is used so we don't have to support SImode in FP registers.
+(define_insn "stfiwx"
+ [(set (match_operand:SI 0 "memory_operand" "=Z")
+ (unspec:SI [(match_operand:DI 1 "gpc_reg_operand" "f")]
+ UNSPEC_STFIWX))]
+ "TARGET_PPC_GFXOPT"
+ "stfiwx %1,%y0"
+ [(set_attr "type" "fpstore")])
+
(define_expand "floatsisf2"
[(set (match_operand:SF 0 "gpc_reg_operand" "")
(float:SF (match_operand:SI 1 "gpc_reg_operand" "")))]
@@ -5275,7 +6061,8 @@
? \"{a|addc} %0,%1,%2\;{ae|adde} %L0,%L1,%L2\"
: \"{ai|addic} %0,%1,%2\;{a%G2e|add%G2e} %L0,%L1\";
}"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
(define_insn "*subdi3_noppc64"
[(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,r,r,r")
@@ -5293,7 +6080,8 @@
? \"{sf|subfc} %0,%2,%1\;{sfe|subfe} %L0,%L2,%L1\"
: \"{sfi|subfic} %0,%2,%1\;{sf%G1e|subf%G1e} %L0,%L2\";
}"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
(define_insn "*negdi2_noppc64"
[(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
@@ -5305,7 +6093,8 @@
? \"{sfi|subfic} %L0,%L1,0\;{sfze|subfze} %0,%1\"
: \"{sfi|subfic} %0,%1,0\;{sfze|subfze} %L0,%L1\";
}"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
(define_expand "mulsidi3"
[(set (match_operand:DI 0 "gpc_reg_operand" "")
@@ -5588,11 +6377,12 @@
"@
{srai|srawi} %0,%1,31\;{srai|srawi} %L0,%1,%h2
{sri|srwi} %L0,%L1,%h2\;insrwi %L0,%1,%h2,0\;{srai|srawi} %0,%1,%h2"
- [(set_attr "length" "8,12")])
+ [(set_attr "type" "two,three")
+ (set_attr "length" "8,12")])
(define_insn "*ashrdisi3_noppc64"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
(const_int 32)) 4))]
"TARGET_32BIT && !TARGET_POWERPC64"
"*
@@ -5602,289 +6392,11 @@
else
return \"mr %0,%1\";
}"
- [(set_attr "length" "4")])
+ [(set_attr "length" "4")])
;; PowerPC64 DImode operations.
-(define_expand "adddi3"
- [(set (match_operand:DI 0 "gpc_reg_operand" "")
- (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_add_cint64_operand" "")))]
- ""
- "
-{
- if (! TARGET_POWERPC64)
- {
- if (non_short_cint_operand (operands[2], DImode))
- FAIL;
- }
- else
- if (GET_CODE (operands[2]) == CONST_INT
- && ! add_operand (operands[2], DImode))
- {
- rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
- ? operands[0] : gen_reg_rtx (DImode));
-
- HOST_WIDE_INT val = INTVAL (operands[2]);
- HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
- HOST_WIDE_INT rest = trunc_int_for_mode (val - low, DImode);
-
- if (!CONST_OK_FOR_LETTER_P (rest, 'L'))
- FAIL;
-
- /* The ordering here is important for the prolog expander.
- When space is allocated from the stack, adding 'low' first may
- produce a temporary deallocation (which would be bad). */
- emit_insn (gen_adddi3 (tmp, operands[1], GEN_INT (rest)));
- emit_insn (gen_adddi3 (operands[0], tmp, GEN_INT (low)));
- DONE;
- }
-}")
-
-;; Discourage ai/addic because of carry but provide it in an alternative
-;; allowing register zero as source.
-
-(define_insn "*adddi3_internal1"
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,?r,r")
- (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,b,r,b")
- (match_operand:DI 2 "add_operand" "r,I,I,L")))]
- "TARGET_POWERPC64"
- "@
- add %0,%1,%2
- addi %0,%1,%2
- addic %0,%1,%2
- addis %0,%1,%v2")
-
-(define_insn "*adddi3_internal2"
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r")
- (match_operand:DI 2 "reg_or_short_operand" "r,I,r,I"))
- (const_int 0)))
- (clobber (match_scratch:DI 3 "=r,r,r,r"))]
- "TARGET_64BIT"
- "@
- add. %3,%1,%2
- addic. %3,%1,%2
- #
- #"
- [(set_attr "type" "fast_compare,compare,compare,compare")
- (set_attr "length" "4,4,8,8")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_short_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 3 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 3)
- (plus:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 0)
- (compare:CC (match_dup 3)
- (const_int 0)))]
- "")
-
-(define_insn "*adddi3_internal3"
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r")
- (match_operand:DI 2 "reg_or_short_operand" "r,I,r,I"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
- (plus:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT"
- "@
- add. %0,%1,%2
- addic. %0,%1,%2
- #
- #"
- [(set_attr "type" "fast_compare,compare,compare,compare")
- (set_attr "length" "4,4,8,8")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_short_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (plus:DI (match_dup 1) (match_dup 2)))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0)
- (plus:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-;; Split an add that we can't do in one insn into two insns, each of which
-;; does one 16-bit part. This is used by combine. Note that the low-order
-;; add should be last in case the result gets used in an address.
-
-(define_split
- [(set (match_operand:DI 0 "gpc_reg_operand" "")
- (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "non_add_cint_operand" "")))]
- "TARGET_POWERPC64"
- [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3)))
- (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
-"
-{
- HOST_WIDE_INT val = INTVAL (operands[2]);
- HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
- HOST_WIDE_INT rest = trunc_int_for_mode (val - low, DImode);
-
- operands[4] = GEN_INT (low);
- if (CONST_OK_FOR_LETTER_P (rest, 'L'))
- operands[3] = GEN_INT (rest);
- else if (! no_new_pseudos)
- {
- operands[3] = gen_reg_rtx (DImode);
- emit_move_insn (operands[3], operands[2]);
- emit_insn (gen_adddi3 (operands[0], operands[1], operands[3]));
- DONE;
- }
- else
- FAIL;
-}")
-
-(define_insn "one_cmpldi2"
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (not:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
- "TARGET_POWERPC64"
- "nor %0,%1,%1")
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (clobber (match_scratch:DI 2 "=r,r"))]
- "TARGET_64BIT"
- "@
- nor. %2,%1,%1
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 2 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 2)
- (not:DI (match_dup 1)))
- (set (match_dup 0)
- (compare:CC (match_dup 2)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (not:DI (match_dup 1)))]
- "TARGET_64BIT"
- "@
- nor. %0,%1,%1
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (not:DI (match_dup 1)))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0)
- (not:DI (match_dup 1)))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (minus:DI (match_operand:DI 1 "reg_or_short_operand" "r,I")
- (match_operand:DI 2 "gpc_reg_operand" "r,r")))]
- "TARGET_POWERPC64"
- "@
- subf %0,%2,%1
- subfic %0,%2,%1")
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (clobber (match_scratch:DI 3 "=r,r"))]
- "TARGET_64BIT"
- "@
- subf. %3,%2,%1
- #"
- [(set_attr "type" "fast_compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 3 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 3)
- (minus:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 0)
- (compare:CC (match_dup 3)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
- (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (minus:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT"
- "@
- subf. %0,%2,%1
- #"
- [(set_attr "type" "fast_compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (minus:DI (match_dup 1) (match_dup 2)))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0)
- (minus:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_expand "subdi3"
- [(set (match_operand:DI 0 "gpc_reg_operand" "")
- (minus:DI (match_operand:DI 1 "reg_or_short_operand" "")
- (match_operand:DI 2 "reg_or_sub_cint64_operand" "")))]
- ""
- "
-{
- if (GET_CODE (operands[2]) == CONST_INT)
- {
- emit_insn (gen_adddi3 (operands[0], operands[1],
- negate_rtx (DImode, operands[2])));
- DONE;
- }
-}")
-
(define_insn_and_split "absdi2"
[(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
(abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,0")))
@@ -5909,115 +6421,20 @@
(set (match_dup 0) (minus:DI (match_dup 2) (match_dup 0)))]
"")
-(define_expand "negdi2"
- [(set (match_operand:DI 0 "gpc_reg_operand" "")
- (neg:DI (match_operand:DI 1 "gpc_reg_operand" "")))]
- ""
- "")
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
- "TARGET_POWERPC64"
- "neg %0,%1")
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (clobber (match_scratch:DI 2 "=r,r"))]
- "TARGET_64BIT"
- "@
- neg. %2,%1
- #"
- [(set_attr "type" "fast_compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 2 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 2)
- (neg:DI (match_dup 1)))
- (set (match_dup 0)
- (compare:CC (match_dup 2)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (neg:DI (match_dup 1)))]
- "TARGET_64BIT"
- "@
- neg. %0,%1
- #"
- [(set_attr "type" "fast_compare")
- (set_attr "length" "4,8")])
-
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (neg:DI (match_dup 1)))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0)
- (neg:DI (match_dup 1)))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn "clzdi2"
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (clz:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
- "TARGET_POWERPC64"
- "cntlzd %0,%1")
-
-(define_expand "ctzdi2"
- [(set (match_dup 2)
- (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
- (parallel [(set (match_dup 3) (and:DI (match_dup 1)
- (match_dup 2)))
- (clobber (scratch:CC))])
- (set (match_dup 4) (clz:DI (match_dup 3)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (minus:DI (const_int 63) (match_dup 4)))]
- "TARGET_POWERPC64"
- {
- operands[2] = gen_reg_rtx (DImode);
- operands[3] = gen_reg_rtx (DImode);
- operands[4] = gen_reg_rtx (DImode);
- })
-
-(define_expand "ffsdi2"
- [(set (match_dup 2)
- (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
- (parallel [(set (match_dup 3) (and:DI (match_dup 1)
- (match_dup 2)))
- (clobber (scratch:CC))])
- (set (match_dup 4) (clz:DI (match_dup 3)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (minus:DI (const_int 64) (match_dup 4)))]
- "TARGET_POWERPC64"
- {
- operands[2] = gen_reg_rtx (DImode);
- operands[3] = gen_reg_rtx (DImode);
- operands[4] = gen_reg_rtx (DImode);
- })
-
(define_insn "muldi3"
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r")
- (match_operand:DI 2 "gpc_reg_operand" "r")))]
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I")))]
"TARGET_POWERPC64"
- "mulld %0,%1,%2"
- [(set_attr "type" "lmul")])
+ "@
+ mulld %0,%1,%2
+ mulli %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "lmul")))])
(define_insn "*muldi3_internal1"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
@@ -6099,126 +6516,6 @@
"mulhdu %0,%1,%2"
[(set_attr "type" "lmul")])
-(define_expand "divdi3"
- [(set (match_operand:DI 0 "gpc_reg_operand" "")
- (div:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_cint_operand" "")))]
- "TARGET_POWERPC64"
- "
-{
- if (GET_CODE (operands[2]) == CONST_INT
- && INTVAL (operands[2]) > 0
- && exact_log2 (INTVAL (operands[2])) >= 0)
- ;
- else
- operands[2] = force_reg (DImode, operands[2]);
-}")
-
-(define_expand "moddi3"
- [(use (match_operand:DI 0 "gpc_reg_operand" ""))
- (use (match_operand:DI 1 "gpc_reg_operand" ""))
- (use (match_operand:DI 2 "reg_or_cint_operand" ""))]
- "TARGET_POWERPC64"
- "
-{
- int i;
- rtx temp1;
- rtx temp2;
-
- if (GET_CODE (operands[2]) != CONST_INT
- || INTVAL (operands[2]) <= 0
- || (i = exact_log2 (INTVAL (operands[2]))) < 0)
- FAIL;
-
- temp1 = gen_reg_rtx (DImode);
- temp2 = gen_reg_rtx (DImode);
-
- emit_insn (gen_divdi3 (temp1, operands[1], operands[2]));
- emit_insn (gen_ashldi3 (temp2, temp1, GEN_INT (i)));
- emit_insn (gen_subdi3 (operands[0], operands[1], temp2));
- DONE;
-}")
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (div:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (match_operand:DI 2 "exact_log2_cint_operand" "N")))]
- "TARGET_POWERPC64"
- "sradi %0,%1,%p2\;addze %0,%0"
- [(set_attr "length" "8")])
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
- (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "exact_log2_cint_operand" "N,N"))
- (const_int 0)))
- (clobber (match_scratch:DI 3 "=r,r"))]
- "TARGET_64BIT"
- "@
- sradi %3,%1,%p2\;addze. %3,%3
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "8,12")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "exact_log2_cint_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 3 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 3)
- (div:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 0)
- (compare:CC (match_dup 3)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
- (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "exact_log2_cint_operand" "N,N"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (div:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT"
- "@
- sradi %0,%1,%p2\;addze. %0,%0
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "8,12")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "exact_log2_cint_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (div:DI (match_dup 1) (match_dup 2)))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0)
- (div:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (div:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (match_operand:DI 2 "gpc_reg_operand" "r")))]
- "TARGET_POWERPC64"
- "divd %0,%1,%2"
- [(set_attr "type" "ldiv")])
-
-(define_insn "udivdi3"
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (udiv:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (match_operand:DI 2 "gpc_reg_operand" "r")))]
- "TARGET_POWERPC64"
- "divdu %0,%1,%2"
- [(set_attr "type" "ldiv")])
-
(define_insn "rotldi3"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
@@ -6604,9 +6901,8 @@
(ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "ri")))]
"TARGET_POWERPC64"
- "sld%I2 %0,%1,%H2"
- [(set_attr "length" "8")])
-
+ "sld%I2 %0,%1,%H2")
+
(define_insn "*ashldi3_internal2"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
(compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
@@ -6619,7 +6915,7 @@
#"
[(set_attr "type" "delayed_compare")
(set_attr "length" "4,8")])
-
+
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
(compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
@@ -6683,7 +6979,7 @@
"@
rldic. %4,%1,%H2,%W3
#"
- [(set_attr "type" "delayed_compare")
+ [(set_attr "type" "compare")
(set_attr "length" "4,8")])
(define_split
@@ -6717,7 +7013,7 @@
"@
rldic. %0,%1,%H2,%W3
#"
- [(set_attr "type" "delayed_compare")
+ [(set_attr "type" "compare")
(set_attr "length" "4,8")])
(define_split
@@ -6759,7 +7055,7 @@
"@
rldicr. %4,%1,%H2,%S3
#"
- [(set_attr "type" "delayed_compare")
+ [(set_attr "type" "compare")
(set_attr "length" "4,8")])
(define_split
@@ -6793,7 +7089,7 @@
"@
rldicr. %0,%1,%H2,%S3
#"
- [(set_attr "type" "delayed_compare")
+ [(set_attr "type" "compare")
(set_attr "length" "4,8")])
(define_split
@@ -6984,18 +7280,20 @@
"")
(define_insn "anddi3"
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r")
- (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r")
- (match_operand:DI 2 "and64_2_operand" "?r,S,K,J,t")))
- (clobber (match_scratch:CC 3 "=X,X,x,x,X"))]
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "?r,S,T,K,J,t")))
+ (clobber (match_scratch:CC 3 "=X,X,X,x,x,X"))]
"TARGET_POWERPC64"
"@
and %0,%1,%2
rldic%B2 %0,%1,0,%S2
+ rlwinm %0,%1,0,%m2,%M2
andi. %0,%1,%b2
andis. %0,%1,%u2
#"
- [(set_attr "length" "4,4,4,4,8")])
+ [(set_attr "type" "*,*,*,compare,compare,*")
+ (set_attr "length" "4,4,4,4,4,8")])
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand" "")
@@ -7004,6 +7302,7 @@
(clobber (match_scratch:CC 3 ""))]
"TARGET_POWERPC64
&& (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
&& !mask64_operand (operands[2], DImode)"
[(set (match_dup 0)
(and:DI (rotate:DI (match_dup 1)
@@ -7013,22 +7312,22 @@
(and:DI (rotate:DI (match_dup 0)
(match_dup 6))
(match_dup 7)))]
- "
{
build_mask64_2_operands (operands[2], &operands[4]);
-}")
+})
(define_insn "*anddi3_internal2"
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,?y,?y,??y,??y,?y")
- (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
- (match_operand:DI 2 "and64_2_operand" "r,S,K,J,t,r,S,K,J,t"))
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,x,?y,?y,?y,??y,??y,?y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "r,S,T,K,J,t,r,S,T,K,J,t"))
(const_int 0)))
- (clobber (match_scratch:DI 3 "=r,r,r,r,r,r,r,r,r,r"))
- (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,x,x,X"))]
+ (clobber (match_scratch:DI 3 "=r,r,r,r,r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,X,X,x,x,X"))]
"TARGET_64BIT"
"@
and. %3,%1,%2
rldic%B2. %3,%1,0,%S2
+ rlwinm. %3,%1,0,%m2,%M2
andi. %3,%1,%b2
andis. %3,%1,%u2
#
@@ -7036,26 +7335,10 @@
#
#
#
+ #
#"
- [(set_attr "type" "compare,delayed_compare,compare,compare,delayed_compare,compare,compare,compare,compare,compare")
- (set_attr "length" "4,4,4,4,8,8,8,8,8,12")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "and64_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 3 ""))
- (clobber (match_scratch:CC 4 ""))]
- "TARGET_POWERPC64 && reload_completed"
- [(parallel [(set (match_dup 3)
- (and:DI (match_dup 1)
- (match_dup 2)))
- (clobber (match_dup 4))])
- (set (match_dup 0)
- (compare:CC (match_dup 3)
- (const_int 0)))]
- "")
+ [(set_attr "type" "compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
(define_split
[(set (match_operand:CC 0 "cc_reg_operand" "")
@@ -7064,8 +7347,9 @@
(const_int 0)))
(clobber (match_scratch:DI 3 ""))
(clobber (match_scratch:CC 4 ""))]
- "TARGET_POWERPC64 && reload_completed
+ "TARGET_64BIT && reload_completed
&& (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
&& !mask64_operand (operands[2], DImode)"
[(set (match_dup 3)
(and:DI (rotate:DI (match_dup 1)
@@ -7083,17 +7367,18 @@
}")
(define_insn "*anddi3_internal3"
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,x,?y,?y,??y,??y,?y")
- (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
- (match_operand:DI 2 "and64_2_operand" "r,S,K,J,t,r,S,K,J,t"))
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,x,x,?y,?y,?y,??y,??y,?y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "r,S,T,K,J,t,r,S,T,K,J,t"))
(const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r")
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r,r,r")
(and:DI (match_dup 1) (match_dup 2)))
- (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,x,x,X"))]
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,X,X,x,x,X"))]
"TARGET_64BIT"
"@
and. %0,%1,%2
rldic%B2. %0,%1,0,%S2
+ rlwinm. %0,%1,0,%m2,%M2
andi. %0,%1,%b2
andis. %0,%1,%u2
#
@@ -7101,19 +7386,20 @@
#
#
#
+ #
#"
- [(set_attr "type" "compare,delayed_compare,compare,compare,delayed_compare,compare,compare,compare,compare,compare")
- (set_attr "length" "4,4,4,4,8,8,8,8,8,12")])
+ [(set_attr "type" "compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
(define_split
[(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "and64_operand" ""))
+ (match_operand:DI 2 "and64_2_operand" ""))
(const_int 0)))
(set (match_operand:DI 0 "gpc_reg_operand" "")
(and:DI (match_dup 1) (match_dup 2)))
(clobber (match_scratch:CC 4 ""))]
- "TARGET_POWERPC64 && reload_completed"
+ "TARGET_64BIT && reload_completed"
[(parallel [(set (match_dup 0)
(and:DI (match_dup 1) (match_dup 2)))
(clobber (match_dup 4))])
@@ -7130,8 +7416,9 @@
(set (match_operand:DI 0 "gpc_reg_operand" "")
(and:DI (match_dup 1) (match_dup 2)))
(clobber (match_scratch:CC 4 ""))]
- "TARGET_POWERPC64 && reload_completed
+ "TARGET_64BIT && reload_completed
&& (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
&& !mask64_operand (operands[2], DImode)"
[(set (match_dup 0)
(and:DI (rotate:DI (match_dup 1)
@@ -7286,7 +7573,7 @@
(const_int 0)))]
"")
-;; Split a logical operation that we can't do in one insn into two insns,
+;; Split a logical operation that we can't do in one insn into two insns,
;; each of which does one 16-bit part. This is used by combine.
(define_split
@@ -7300,7 +7587,7 @@
"
{
rtx i3,i4;
-
+
if (GET_CODE (operands[2]) == CONST_DOUBLE)
{
HOST_WIDE_INT value = CONST_DOUBLE_LOW (operands[2]);
@@ -7314,10 +7601,10 @@
& (~ (HOST_WIDE_INT) 0xffff));
i4 = GEN_INT (INTVAL (operands[2]) & 0xffff);
}
- operands[4] = gen_rtx (GET_CODE (operands[3]), DImode,
- operands[1], i3);
- operands[5] = gen_rtx (GET_CODE (operands[3]), DImode,
- operands[0], i4);
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[3]), DImode,
+ operands[1], i3);
+ operands[5] = gen_rtx_fmt_ee (GET_CODE (operands[3]), DImode,
+ operands[0], i4);
}")
(define_insn "*boolcdi3_internal1"
@@ -7454,41 +7741,6 @@
;; Now define ways of moving data around.
-;; Elf specific ways of loading addresses for non-PIC code.
-;; The output of this could be r0, but we make a very strong
-;; preference for a base register because it will usually
-;; be needed there.
-(define_insn "elf_high"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
- (high:SI (match_operand 1 "" "")))]
- "TARGET_ELF && ! TARGET_64BIT"
- "{liu|lis} %0,%1@ha")
-
-(define_insn "elf_low"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
- (match_operand 2 "" "")))]
- "TARGET_ELF && ! TARGET_64BIT"
- "@
- {cal|la} %0,%2@l(%1)
- {ai|addic} %0,%1,%K2")
-
-;; Mach-O PIC trickery.
-(define_insn "macho_high"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
- (high:SI (match_operand 1 "" "")))]
- "TARGET_MACHO && ! TARGET_64BIT"
- "{liu|lis} %0,ha16(%1)")
-
-(define_insn "macho_low"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
- (match_operand 2 "" "")))]
- "TARGET_MACHO && ! TARGET_64BIT"
- "@
- {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
- {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
-
;; Set up a register with a value from the GOT table
(define_expand "movsi_got"
@@ -7528,7 +7780,7 @@
;; Used by sched, shorten_branches and final when the GOT pseudo reg
;; didn't get allocated to a hard register.
-(define_split
+(define_split
[(set (match_operand:SI 0 "gpc_reg_operand" "")
(unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
(match_operand:SI 2 "memory_operand" "")]
@@ -7545,12 +7797,6 @@
;; do the load 16-bits at a time. We could do this by loading from memory,
;; and this is even supposed to be faster, but it is simpler not to get
;; integers in the TOC.
-(define_expand "movsi"
- [(set (match_operand:SI 0 "general_operand" "")
- (match_operand:SI 1 "any_operand" ""))]
- ""
- "{ rs6000_emit_move (operands[0], operands[1], SImode); DONE; }")
-
(define_insn "movsi_low"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
@@ -7560,90 +7806,8 @@
[(set_attr "type" "load")
(set_attr "length" "4")])
-(define_insn "movsi_low_st"
- [(set (mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
- (match_operand 2 "" "")))
- (match_operand:SI 0 "gpc_reg_operand" "r"))]
- "TARGET_MACHO && ! TARGET_64BIT"
- "{st|stw} %0,lo16(%2)(%1)"
- [(set_attr "type" "store")
- (set_attr "length" "4")])
-
-(define_insn "movdf_low"
- [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
- (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
- (match_operand 2 "" ""))))]
- "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
- "*
-{
- switch (which_alternative)
- {
- case 0:
- return \"lfd %0,lo16(%2)(%1)\";
- case 1:
- {
- rtx operands2[4];
- operands2[0] = operands[0];
- operands2[1] = operands[1];
- operands2[2] = operands[2];
- if (TARGET_POWERPC64 && TARGET_32BIT)
- /* Note, old assemblers didn't support relocation here. */
- return \"ld %0,lo16(%2)(%1)\";
- else
- {
- operands2[3] = gen_rtx_REG (SImode, RS6000_PIC_OFFSET_TABLE_REGNUM);
- output_asm_insn (\"{l|lwz} %0,lo16(%2)(%1)\", operands);
-#if TARGET_MACHO
- if (MACHO_DYNAMIC_NO_PIC_P)
- output_asm_insn (\"{liu|lis} %L0,ha16(%2+4)\", operands);
- else
- /* We cannot rely on ha16(low half)==ha16(high half), alas,
- although in practice it almost always is. */
- output_asm_insn (\"{cau|addis} %L0,%3,ha16(%2+4)\", operands2);
-#endif
- return (\"{l|lwz} %L0,lo16(%2+4)(%L0)\");
- }
- }
- default:
- abort();
- }
-}"
- [(set_attr "type" "load")
- (set_attr "length" "4,12")])
-
-(define_insn "movdf_low_st"
- [(set (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
- (match_operand 2 "" "")))
- (match_operand:DF 0 "gpc_reg_operand" "f"))]
- "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
- "stfd %0,lo16(%2)(%1)"
- [(set_attr "type" "store")
- (set_attr "length" "4")])
-
-(define_insn "movsf_low"
- [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
- (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
- (match_operand 2 "" ""))))]
- "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
- "@
- lfs %0,lo16(%2)(%1)
- {l|lwz} %0,lo16(%2)(%1)"
- [(set_attr "type" "load")
- (set_attr "length" "4")])
-
-(define_insn "movsf_low_st"
- [(set (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
- (match_operand 2 "" "")))
- (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
- "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
- "@
- stfs %0,lo16(%2)(%1)
- {st|stw} %0,lo16(%2)(%1)"
- [(set_attr "type" "store")
- (set_attr "length" "4")])
-
(define_insn "*movsi_internal1"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,m,r,r,r,r,r,*q,*c*l,*h,*h")
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "=r,r,r,m,r,r,r,r,r,*q,*c*l,*h,*h")
(match_operand:SI 1 "input_operand" "r,U,m,r,I,L,n,R,*h,r,r,r,0"))]
"gpc_reg_operand (operands[0], SImode)
|| gpc_reg_operand (operands[1], SImode)"
@@ -7686,14 +7850,14 @@
FAIL;
}")
-(define_insn "*movsi_internal2"
+(define_insn "*mov<mode>_internal2"
[(set (match_operand:CC 2 "cc_reg_operand" "=y,x,?y")
- (compare:CC (match_operand:SI 1 "gpc_reg_operand" "0,r,r")
+ (compare:CC (match_operand:P 1 "gpc_reg_operand" "0,r,r")
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r") (match_dup 1))]
- "TARGET_32BIT"
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r") (match_dup 1))]
+ ""
"@
- {cmpi|cmpwi} %2,%0,0
+ {cmpi|cmp<wd>i} %2,%0,0
mr. %0,%1
#"
[(set_attr "type" "cmp,compare,cmp")
@@ -7701,22 +7865,16 @@
(define_split
[(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (match_operand:SI 1 "gpc_reg_operand" "")
+ (compare:CC (match_operand:P 1 "gpc_reg_operand" "")
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "") (match_dup 1))]
- "TARGET_32BIT && reload_completed"
+ (set (match_operand:P 0 "gpc_reg_operand" "") (match_dup 1))]
+ "reload_completed"
[(set (match_dup 0) (match_dup 1))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
"")
-(define_expand "movhi"
- [(set (match_operand:HI 0 "general_operand" "")
- (match_operand:HI 1 "any_operand" ""))]
- ""
- "{ rs6000_emit_move (operands[0], operands[1], HImode); DONE; }")
-
(define_insn "*movhi_internal"
[(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
(match_operand:HI 1 "input_operand" "r,m,r,i,*h,r,r,0"))]
@@ -7733,11 +7891,11 @@
{cror 0,0,0|nop}"
[(set_attr "type" "*,load,store,*,mfjmpr,*,mtjmpr,*")])
-(define_expand "movqi"
- [(set (match_operand:QI 0 "general_operand" "")
- (match_operand:QI 1 "any_operand" ""))]
+(define_expand "mov<mode>"
+ [(set (match_operand:INT 0 "general_operand" "")
+ (match_operand:INT 1 "any_operand" ""))]
""
- "{ rs6000_emit_move (operands[0], operands[1], QImode); DONE; }")
+ "{ rs6000_emit_move (operands[0], operands[1], <MODE>mode); DONE; }")
(define_insn "*movqi_internal"
[(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
@@ -7765,42 +7923,44 @@
"")
(define_insn "*movcc_internal1"
- [(set (match_operand:CC 0 "nonimmediate_operand" "=y,x,?y,r,r,r,r,q,cl,r,m")
- (match_operand:CC 1 "nonimmediate_operand" "y,r,r,x,y,r,h,r,r,m,r"))]
+ [(set (match_operand:CC 0 "nonimmediate_operand" "=y,x,?y,y,r,r,r,r,r,q,cl,r,m")
+ (match_operand:CC 1 "general_operand" "y,r,r,O,x,y,r,I,h,r,r,m,r"))]
"register_operand (operands[0], CCmode)
|| register_operand (operands[1], CCmode)"
"@
mcrf %0,%1
mtcrf 128,%1
{rlinm|rlwinm} %1,%1,%F0,0xffffffff\;mtcrf %R0,%1\;{rlinm|rlwinm} %1,%1,%f0,0xffffffff
+ crxor %0,%0,%0
mfcr %0%Q1
mfcr %0%Q1\;{rlinm|rlwinm} %0,%0,%f1,0xf0000000
mr %0,%1
+ {lil|li} %0,%1
mf%1 %0
mt%0 %1
mt%0 %1
{l%U1%X1|lwz%U1%X1} %0,%1
{st%U0%U1|stw%U0%U1} %1,%0"
[(set (attr "type")
- (cond [(eq_attr "alternative" "0")
+ (cond [(eq_attr "alternative" "0,3")
(const_string "cr_logical")
(eq_attr "alternative" "1,2")
(const_string "mtcr")
- (eq_attr "alternative" "5,7")
+ (eq_attr "alternative" "6,7,9")
(const_string "integer")
- (eq_attr "alternative" "6")
- (const_string "mfjmpr")
(eq_attr "alternative" "8")
+ (const_string "mfjmpr")
+ (eq_attr "alternative" "10")
(const_string "mtjmpr")
- (eq_attr "alternative" "9")
+ (eq_attr "alternative" "11")
(const_string "load")
- (eq_attr "alternative" "10")
+ (eq_attr "alternative" "12")
(const_string "store")
(ne (symbol_ref "TARGET_MFCRF") (const_int 0))
(const_string "mfcrf")
]
(const_string "mfcr")))
- (set_attr "length" "4,4,12,4,8,4,4,4,4,4,4")])
+ (set_attr "length" "4,4,12,4,4,8,4,4,4,4,4,4,4")])
;; For floating-point, we normally deal with the floating-point registers
;; unless -msoft-float is used. The sole exception is that parameter passing
@@ -7839,7 +7999,7 @@
}")
(define_insn "*movsf_hardfloat"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=!r,!r,m,f,f,m,!cl,!q,!r,!h,!r,!r")
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=!r,!r,m,f,f,m,*c*l,*q,!r,*h,!r,!r")
(match_operand:SF 1 "input_operand" "r,m,r,f,m,f,r,r,h,0,G,Fn"))]
"(gpc_reg_operand (operands[0], SFmode)
|| gpc_reg_operand (operands[1], SFmode))
@@ -7857,7 +8017,7 @@
{cror 0,0,0|nop}
#
#"
- [(set_attr "type" "*,load,store,fp,fpload,fpstore,*,mtjmpr,*,*,*,*")
+ [(set_attr "type" "*,load,store,fp,fpload,fpstore,mtjmpr,*,mfjmpr,*,*,*")
(set_attr "length" "4,4,4,4,4,4,4,4,4,4,4,8")])
(define_insn "*movsf_softfloat"
@@ -7879,7 +8039,7 @@
#
#
{cror 0,0,0|nop}"
- [(set_attr "type" "*,mtjmpr,*,*,load,store,*,*,*,*,*,*")
+ [(set_attr "type" "*,mtjmpr,*,mfjmpr,load,store,*,*,*,*,*,*")
(set_attr "length" "4,4,4,4,4,4,4,4,4,4,8,4")])
@@ -7941,7 +8101,7 @@
(define_split
[(set (match_operand:DF 0 "gpc_reg_operand" "")
- (match_operand:DF 1 "easy_fp_constant" ""))]
+ (match_operand:DF 1 "const_double_operand" ""))]
"TARGET_POWERPC64 && reload_completed
&& ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
|| (GET_CODE (operands[0]) == SUBREG
@@ -7989,7 +8149,7 @@
switch (which_alternative)
{
default:
- abort ();
+ gcc_unreachable ();
case 0:
/* We normally copy the low-numbered register first. However, if
the first register operand 0 is the same as the second register
@@ -7999,7 +8159,7 @@
else
return \"mr %0,%1\;mr %L0,%L1\";
case 1:
- if (offsettable_memref_p (operands[1])
+ if (rs6000_offsettable_memref_p (operands[1])
|| (GET_CODE (operands[1]) == MEM
&& (GET_CODE (XEXP (operands[1], 0)) == LO_SUM
|| GET_CODE (XEXP (operands[1], 0)) == PRE_INC
@@ -8039,7 +8199,7 @@
}
}
case 2:
- if (offsettable_memref_p (operands[0])
+ if (rs6000_offsettable_memref_p (operands[0])
|| (GET_CODE (operands[0]) == MEM
&& (GET_CODE (XEXP (operands[0], 0)) == LO_SUM
|| GET_CODE (XEXP (operands[0], 0)) == PRE_INC
@@ -8068,13 +8228,13 @@
return \"#\";
}
}"
- [(set_attr "type" "*,load,store,fp,fpload,fpstore,*,*,*")
+ [(set_attr "type" "two,load,store,fp,fpload,fpstore,*,*,*")
(set_attr "length" "8,16,16,4,4,4,8,12,16")])
(define_insn "*movdf_softfloat32"
[(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,r,r,r")
(match_operand:DF 1 "input_operand" "r,m,r,G,H,F"))]
- "! TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ "! TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || TARGET_E500_SINGLE)
&& (gpc_reg_operand (operands[0], DFmode)
|| gpc_reg_operand (operands[1], DFmode))"
"*
@@ -8082,7 +8242,7 @@
switch (which_alternative)
{
default:
- abort ();
+ gcc_unreachable ();
case 0:
/* We normally copy the low-numbered register first. However, if
the first register operand 0 is the same as the second register of
@@ -8109,13 +8269,13 @@
return \"#\";
}
}"
- [(set_attr "type" "*,load,store,*,*,*")
+ [(set_attr "type" "two,load,store,*,*,*")
(set_attr "length" "8,8,8,8,12,16")])
; ld/std require word-aligned displacements -> 'Y' constraint.
; List Y->r and r->Y before r->r for reload.
(define_insn "*movdf_hardfloat64"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,!cl,!r,!h,!r,!r,!r")
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r")
(match_operand:DF 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F"))]
"TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS
&& (gpc_reg_operand (operands[0], DFmode)
@@ -8133,7 +8293,7 @@
#
#
#"
- [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,*,*,*,*,*")
+ [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*")
(set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16")])
(define_insn "*movdf_softfloat64"
@@ -8152,23 +8312,22 @@
#
#
{cror 0,0,0|nop}"
- [(set_attr "type" "load,store,*,*,*,*,*,*,*")
+ [(set_attr "type" "load,store,*,mtjmpr,mfjmpr,*,*,*,*")
(set_attr "length" "4,4,4,4,4,8,12,16,4")])
(define_expand "movtf"
[(set (match_operand:TF 0 "general_operand" "")
(match_operand:TF 1 "any_operand" ""))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
- && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128"
"{ rs6000_emit_move (operands[0], operands[1], TFmode); DONE; }")
; It's important to list the o->f and f->o moves before f->f because
; otherwise reload, given m->f, will try to pick f->f and reload it,
-; which doesn't make progress.
+; which doesn't make progress. Likewise r->Y must be before r->r.
(define_insn_and_split "*movtf_internal"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=o,f,f,rm,r")
- (match_operand:TF 1 "input_operand" "f,o,f,r,mGHF"))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=o,f,f,r,Y,r")
+ (match_operand:TF 1 "input_operand" "f,o,f,YGHF,r,r"))]
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128
&& (gpc_reg_operand (operands[0], TFmode)
|| gpc_reg_operand (operands[1], TFmode))"
@@ -8176,23 +8335,39 @@
"&& reload_completed"
[(pc)]
{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; }
- [(set_attr "length" "8,8,8,20,20")])
+ [(set_attr "length" "8,8,8,20,20,16")])
+
+(define_insn_and_split "*movtf_softfloat"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,Y,r")
+ (match_operand:TF 1 "input_operand" "YGHF,r,r"))]
+ "!TARGET_IEEEQUAD
+ && (TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_LONG_DOUBLE_128
+ && (gpc_reg_operand (operands[0], TFmode)
+ || gpc_reg_operand (operands[1], TFmode))"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; }
+ [(set_attr "length" "20,20,16")])
(define_expand "extenddftf2"
[(parallel [(set (match_operand:TF 0 "nonimmediate_operand" "")
(float_extend:TF (match_operand:DF 1 "input_operand" "")))
(use (match_dup 2))])]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
{
operands[2] = CONST0_RTX (DFmode);
+ /* Generate GOT reference early for SVR4 PIC. */
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ operands[2] = validize_mem (force_const_mem (DFmode, operands[2]));
})
(define_insn_and_split "*extenddftf2_internal"
[(set (match_operand:TF 0 "nonimmediate_operand" "=o,f,&f,r")
(float_extend:TF (match_operand:DF 1 "input_operand" "fr,mf,mf,rmGHF")))
- (use (match_operand:DF 2 "input_operand" "rf,m,f,n"))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ (use (match_operand:DF 2 "zero_reg_mem_operand" "rf,m,f,n"))]
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"#"
"&& reload_completed"
@@ -8205,12 +8380,12 @@
emit_move_insn (simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word),
operands[2]);
DONE;
-})
+})
(define_expand "extendsftf2"
[(set (match_operand:TF 0 "nonimmediate_operand" "")
(float_extend:TF (match_operand:SF 1 "gpc_reg_operand" "")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
{
rtx tmp = gen_reg_rtx (DFmode);
@@ -8222,14 +8397,14 @@
(define_expand "trunctfdf2"
[(set (match_operand:DF 0 "gpc_reg_operand" "")
(float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"")
(define_insn_and_split "trunctfdf2_internal1"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f")
(float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "0,f")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && !TARGET_XL_COMPAT
+ "!TARGET_IEEEQUAD && !TARGET_XL_COMPAT
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"@
#
@@ -8245,7 +8420,7 @@
(define_insn "trunctfdf2_internal2"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "f")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && TARGET_XL_COMPAT
+ "!TARGET_IEEEQUAD && TARGET_XL_COMPAT
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"fadd %0,%1,%L1"
[(set_attr "type" "fp")])
@@ -8254,7 +8429,7 @@
[(set (match_operand:SF 0 "gpc_reg_operand" "=f")
(float_truncate:SF (match_operand:TF 1 "gpc_reg_operand" "f")))
(clobber (match_scratch:DF 2 "=f"))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"#"
"&& reload_completed"
@@ -8265,9 +8440,9 @@
"")
(define_expand "floatsitf2"
- [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
- (float:TF (match_operand:SI 1 "gpc_reg_operand" "r")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ [(set (match_operand:TF 0 "gpc_reg_operand" "")
+ (float:TF (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
{
rtx tmp = gen_reg_rtx (DFmode);
@@ -8295,7 +8470,7 @@
(clobber (match_dup 3))
(clobber (match_dup 4))
(clobber (match_dup 5))])]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& (TARGET_POWER2 || TARGET_POWERPC)
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
{
@@ -8312,31 +8487,28 @@
(clobber (match_operand:DF 3 "gpc_reg_operand" "=&f"))
(clobber (match_operand:DI 4 "gpc_reg_operand" "=f"))
(clobber (match_operand:DI 5 "memory_operand" "=o"))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"#"
- "&& reload_completed"
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[5]))"
[(pc)]
{
rtx lowword;
emit_insn (gen_fix_trunc_helper (operands[2], operands[1], operands[3]));
- if (GET_CODE (operands[5]) != MEM)
- abort();
- lowword = XEXP (operands[5], 0);
- if (WORDS_BIG_ENDIAN)
- lowword = plus_constant (lowword, 4);
+ gcc_assert (MEM_P (operands[5]));
+ lowword = adjust_address (operands[5], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
emit_insn (gen_fctiwz (operands[4], operands[2]));
emit_move_insn (operands[5], operands[4]);
- emit_move_insn (operands[0], gen_rtx_MEM (SImode, lowword));
+ emit_move_insn (operands[0], lowword);
DONE;
})
(define_insn "negtf2"
[(set (match_operand:TF 0 "gpc_reg_operand" "=f")
(neg:TF (match_operand:TF 1 "gpc_reg_operand" "f")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"*
{
@@ -8351,7 +8523,7 @@
(define_expand "abstf2"
[(set (match_operand:TF 0 "gpc_reg_operand" "=f")
(abs:TF (match_operand:TF 1 "gpc_reg_operand" "f")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"
{
@@ -8371,7 +8543,7 @@
(label_ref (match_operand 2 "" ""))
(pc)))
(set (match_dup 6) (neg:DF (match_dup 6)))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"
{
@@ -8385,43 +8557,24 @@
;; Next come the multi-word integer load and store and the load and store
;; multiple insns.
-(define_expand "movdi"
- [(set (match_operand:DI 0 "general_operand" "")
- (match_operand:DI 1 "any_operand" ""))]
- ""
- "{ rs6000_emit_move (operands[0], operands[1], DImode); DONE; }")
+; List r->r after r->"o<>", otherwise reload will try to reload a
+; non-offsettable address by using r->r which won't make progress.
(define_insn "*movdi_internal32"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m,f,f,m,r,r,r,r,r")
- (match_operand:DI 1 "input_operand" "r,m,r,f,m,f,IJK,n,G,H,F"))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,r,r,*f,*f,m,r")
+ (match_operand:DI 1 "input_operand" "r,r,m,f,m,f,IJKnGHF"))]
"! TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], DImode)
|| gpc_reg_operand (operands[1], DImode))"
- "*
-{
- switch (which_alternative)
- {
- default:
- abort ();
- case 0:
- case 1:
- case 2:
- return \"#\";
- case 3:
- return \"fmr %0,%1\";
- case 4:
- return \"lfd%U1%X1 %0,%1\";
- case 5:
- return \"stfd%U0%X0 %1,%0\";
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- return \"#\";
- }
-}"
- [(set_attr "type" "*,load,store,fp,fpload,fpstore,*,*,*,*,*")])
+ "@
+ #
+ #
+ #
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ #"
+ [(set_attr "type" "load,*,store,fp,fpload,fpstore,*")])
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand" "")
@@ -8447,47 +8600,21 @@
(define_split
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(match_operand:DI 1 "input_operand" ""))]
- "reload_completed && !TARGET_POWERPC64
+ "reload_completed && !TARGET_POWERPC64
&& gpr_or_gpr_p (operands[0], operands[1])"
[(pc)]
{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
-(define_split
- [(set (match_operand:TI 0 "gpc_reg_operand" "")
- (match_operand:TI 1 "const_double_operand" ""))]
- "TARGET_POWERPC64"
- [(set (match_dup 2) (match_dup 4))
- (set (match_dup 3) (match_dup 5))]
- "
-{
- operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
- TImode);
- operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
- TImode);
- if (GET_CODE (operands[1]) == CONST_DOUBLE)
- {
- operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
- operands[5] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
- }
- else if (GET_CODE (operands[1]) == CONST_INT)
- {
- operands[4] = GEN_INT (- (INTVAL (operands[1]) < 0));
- operands[5] = operands[1];
- }
- else
- FAIL;
-}")
-
(define_insn "*movdi_internal64"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=Y,r,r,r,r,r,r,??f,f,m,r,*h,*h")
- (match_operand:DI 1 "input_operand" "r,Y,r,I,L,nF,R,f,m,f,*h,r,0"))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m,r,r,r,r,*f,*f,m,r,*h,*h")
+ (match_operand:DI 1 "input_operand" "r,m,r,I,L,nF,R,f,m,f,*h,r,0"))]
"TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], DImode)
|| gpc_reg_operand (operands[1], DImode))"
"@
- std%U0%X0 %1,%0
- ld%U1%X1 %0,%1
mr %0,%1
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
li %0,%1
lis %0,%v1
#
@@ -8498,7 +8625,7 @@
mf%1 %0
mt%0 %1
{cror 0,0,0|nop}"
- [(set_attr "type" "store,load,*,*,*,*,*,fp,fpload,fpstore,mfjmpr,mtjmpr,*")
+ [(set_attr "type" "*,load,store,*,*,*,*,fp,fpload,fpstore,mfjmpr,mtjmpr,*")
(set_attr "length" "4,4,4,4,4,20,4,4,4,4,4,4,4")])
;; immediate value valid for a single instruction hiding in a const_double
@@ -8561,41 +8688,10 @@
else
FAIL;
}")
-
-(define_insn "*movdi_internal2"
- [(set (match_operand:CC 2 "cc_reg_operand" "=y,x,?y")
- (compare:CC (match_operand:DI 1 "gpc_reg_operand" "0,r,r")
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r") (match_dup 1))]
- "TARGET_64BIT"
- "@
- cmpdi %2,%0,0
- mr. %0,%1
- #"
- [(set_attr "type" "cmp,compare,cmp")
- (set_attr "length" "4,4,8")])
-
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC (match_operand:DI 1 "gpc_reg_operand" "")
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "") (match_dup 1))]
- "TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 0) (match_dup 1))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
;; TImode is similar, except that we usually want to compute the address into
;; a register and use lsi/stsi (the exception is during reload). MQ is also
;; clobbered in stsi for POWER, so we need a SCRATCH for it.
-(define_expand "movti"
- [(parallel [(set (match_operand:TI 0 "general_operand" "")
- (match_operand:TI 1 "general_operand" ""))
- (clobber (scratch:SI))])]
- ""
- "{ rs6000_emit_move (operands[0], operands[1], TImode); DONE; }")
;; We say that MQ is clobbered in the last alternative because the first
;; alternative would never get used otherwise since it would need a reload
@@ -8604,17 +8700,17 @@
;; giving the SCRATCH mq.
(define_insn "*movti_power"
- [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r")
- (match_operand:TI 1 "reg_or_mem_operand" "r,r,r,Q,m"))
- (clobber (match_scratch:SI 2 "=q,q#X,X,X,X"))]
- "TARGET_POWER && ! TARGET_POWERPC64
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r,r")
+ (match_operand:TI 1 "input_operand" "r,r,r,Q,m,n"))
+ (clobber (match_scratch:SI 2 "=q,q#X,X,X,X,X"))]
+ "TARGET_POWER && ! TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
"*
{
switch (which_alternative)
{
default:
- abort ();
+ gcc_unreachable ();
case 0:
if (TARGET_STRING)
@@ -8630,14 +8726,15 @@
return \"{lsi|lswi} %0,%P1,16\";
/* ... fall through ... */
case 4:
+ case 5:
return \"#\";
}
}"
- [(set_attr "type" "store,store,*,load,load")])
+ [(set_attr "type" "store,store,*,load,load,*")])
(define_insn "*movti_string"
- [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r")
- (match_operand:TI 1 "reg_or_mem_operand" "r,r,r,Q,m"))]
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,o<>,????r,????r,????r,r")
+ (match_operand:TI 1 "input_operand" "r,r,r,Q,m,n"))]
"! TARGET_POWER && ! TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
"*
@@ -8645,7 +8742,7 @@
switch (which_alternative)
{
default:
- abort ();
+ gcc_unreachable ();
case 0:
if (TARGET_STRING)
return \"{stsi|stswi} %1,%P0,16\";
@@ -8655,28 +8752,52 @@
case 3:
/* If the address is not used in the output, we can use lsi. Otherwise,
fall through to generating four loads. */
- if (TARGET_STRING
+ if (TARGET_STRING
&& ! reg_overlap_mentioned_p (operands[0], operands[1]))
return \"{lsi|lswi} %0,%P1,16\";
/* ... fall through ... */
case 4:
+ case 5:
return \"#\";
}
}"
- [(set_attr "type" "store,store,*,load,load")])
+ [(set_attr "type" "store_ux,store_ux,*,load_ux,load_ux,*")])
(define_insn "*movti_ppc64"
- [(set (match_operand:TI 0 "nonimmediate_operand" "=r,m,r")
- (match_operand:TI 1 "input_operand" "r,r,o"))]
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o<>,r")
+ (match_operand:TI 1 "input_operand" "r,r,m"))]
"TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode)
|| gpc_reg_operand (operands[1], TImode))"
- "@
- #
- #
- #"
+ "#"
[(set_attr "type" "*,load,store")])
(define_split
+ [(set (match_operand:TI 0 "gpc_reg_operand" "")
+ (match_operand:TI 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
+ TImode);
+ operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
+ TImode);
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[5] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ operands[4] = GEN_INT (- (INTVAL (operands[1]) < 0));
+ operands[5] = operands[1];
+ }
+ else
+ FAIL;
+}")
+
+(define_split
[(set (match_operand:TI 0 "nonimmediate_operand" "")
(match_operand:TI 1 "input_operand" ""))]
"reload_completed
@@ -8741,7 +8862,7 @@
"TARGET_STRING && XVECLEN (operands[0], 0) == 8"
"*
{ return rs6000_output_load_multiple (operands); }"
- [(set_attr "type" "load")
+ [(set_attr "type" "load_ux")
(set_attr "length" "32")])
(define_insn "*ldmsi7"
@@ -8763,7 +8884,7 @@
"TARGET_STRING && XVECLEN (operands[0], 0) == 7"
"*
{ return rs6000_output_load_multiple (operands); }"
- [(set_attr "type" "load")
+ [(set_attr "type" "load_ux")
(set_attr "length" "32")])
(define_insn "*ldmsi6"
@@ -8783,7 +8904,7 @@
"TARGET_STRING && XVECLEN (operands[0], 0) == 6"
"*
{ return rs6000_output_load_multiple (operands); }"
- [(set_attr "type" "load")
+ [(set_attr "type" "load_ux")
(set_attr "length" "32")])
(define_insn "*ldmsi5"
@@ -8801,7 +8922,7 @@
"TARGET_STRING && XVECLEN (operands[0], 0) == 5"
"*
{ return rs6000_output_load_multiple (operands); }"
- [(set_attr "type" "load")
+ [(set_attr "type" "load_ux")
(set_attr "length" "32")])
(define_insn "*ldmsi4"
@@ -8817,7 +8938,7 @@
"TARGET_STRING && XVECLEN (operands[0], 0) == 4"
"*
{ return rs6000_output_load_multiple (operands); }"
- [(set_attr "type" "load")
+ [(set_attr "type" "load_ux")
(set_attr "length" "32")])
(define_insn "*ldmsi3"
@@ -8831,7 +8952,7 @@
"TARGET_STRING && XVECLEN (operands[0], 0) == 3"
"*
{ return rs6000_output_load_multiple (operands); }"
- [(set_attr "type" "load")
+ [(set_attr "type" "load_ux")
(set_attr "length" "32")])
(define_expand "store_multiple"
@@ -8878,20 +8999,11 @@
gen_rtx_REG (SImode, regno + i));
}")
-(define_insn "*store_multiple_power"
- [(match_parallel 0 "store_multiple_operation"
- [(set (match_operand:SI 1 "indirect_operand" "=Q")
- (match_operand:SI 2 "gpc_reg_operand" "r"))
- (clobber (match_scratch:SI 3 "=q"))])]
- "TARGET_STRING && TARGET_POWER"
- "{stsi|stswi} %2,%P1,%O0"
- [(set_attr "type" "store")])
-
(define_insn "*stmsi8"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
(match_operand:SI 2 "gpc_reg_operand" "r"))
- (clobber (match_scratch:SI 3 "X"))
+ (clobber (match_scratch:SI 3 "=X"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
(match_operand:SI 4 "gpc_reg_operand" "r"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
@@ -8908,13 +9020,13 @@
(match_operand:SI 10 "gpc_reg_operand" "r"))])]
"TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 9"
"{stsi|stswi} %2,%1,%O0"
- [(set_attr "type" "store")])
+ [(set_attr "type" "store_ux")])
(define_insn "*stmsi7"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
(match_operand:SI 2 "gpc_reg_operand" "r"))
- (clobber (match_scratch:SI 3 "X"))
+ (clobber (match_scratch:SI 3 "=X"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
(match_operand:SI 4 "gpc_reg_operand" "r"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
@@ -8929,13 +9041,13 @@
(match_operand:SI 9 "gpc_reg_operand" "r"))])]
"TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 8"
"{stsi|stswi} %2,%1,%O0"
- [(set_attr "type" "store")])
+ [(set_attr "type" "store_ux")])
(define_insn "*stmsi6"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
(match_operand:SI 2 "gpc_reg_operand" "r"))
- (clobber (match_scratch:SI 3 "X"))
+ (clobber (match_scratch:SI 3 "=X"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
(match_operand:SI 4 "gpc_reg_operand" "r"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
@@ -8948,13 +9060,13 @@
(match_operand:SI 8 "gpc_reg_operand" "r"))])]
"TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 7"
"{stsi|stswi} %2,%1,%O0"
- [(set_attr "type" "store")])
+ [(set_attr "type" "store_ux")])
(define_insn "*stmsi5"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
(match_operand:SI 2 "gpc_reg_operand" "r"))
- (clobber (match_scratch:SI 3 "X"))
+ (clobber (match_scratch:SI 3 "=X"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
(match_operand:SI 4 "gpc_reg_operand" "r"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
@@ -8965,13 +9077,13 @@
(match_operand:SI 7 "gpc_reg_operand" "r"))])]
"TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 6"
"{stsi|stswi} %2,%1,%O0"
- [(set_attr "type" "store")])
+ [(set_attr "type" "store_ux")])
(define_insn "*stmsi4"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
(match_operand:SI 2 "gpc_reg_operand" "r"))
- (clobber (match_scratch:SI 3 "X"))
+ (clobber (match_scratch:SI 3 "=X"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
(match_operand:SI 4 "gpc_reg_operand" "r"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
@@ -8980,28 +9092,154 @@
(match_operand:SI 6 "gpc_reg_operand" "r"))])]
"TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 5"
"{stsi|stswi} %2,%1,%O0"
- [(set_attr "type" "store")])
+ [(set_attr "type" "store_ux")])
(define_insn "*stmsi3"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
(match_operand:SI 2 "gpc_reg_operand" "r"))
- (clobber (match_scratch:SI 3 "X"))
+ (clobber (match_scratch:SI 3 "=X"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
(match_operand:SI 4 "gpc_reg_operand" "r"))
(set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
(match_operand:SI 5 "gpc_reg_operand" "r"))])]
"TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 4"
"{stsi|stswi} %2,%1,%O0"
- [(set_attr "type" "store")])
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi8_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 28)))
+ (match_operand:SI 10 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 9"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi7_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 8"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi6_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 7"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi5_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 6"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi4_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 5"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi3_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 4"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+(define_expand "setmemsi"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 3 "" ""))])]
+ ""
+ "
+{
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (expand_block_clear (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
;; String/block move insn.
;; Argument 0 is the destination
;; Argument 1 is the source
;; Argument 2 is the length
;; Argument 3 is the alignment
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(use (match_operand:SI 2 "" ""))
@@ -9018,7 +9256,7 @@
;; Move up to 32 bytes at a time. The fixed registers are needed because the
;; register allocator doesn't have a clue about allocating 8 word registers.
;; rD/rS = r5 is preferred, efficient form.
-(define_expand "movstrsi_8reg"
+(define_expand "movmemsi_8reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
@@ -9056,12 +9294,12 @@
&& (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
&& REGNO (operands[4]) == 5"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
(define_insn ""
- [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
(use (match_operand:SI 2 "immediate_operand" "i"))
(use (match_operand:SI 3 "immediate_operand" "i"))
(clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
@@ -9072,7 +9310,7 @@
(clobber (reg:SI 10))
(clobber (reg:SI 11))
(clobber (reg:SI 12))
- (clobber (match_scratch:SI 5 "X"))]
+ (clobber (match_scratch:SI 5 "=X"))]
"TARGET_STRING && ! TARGET_POWER
&& ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
|| INTVAL (operands[2]) == 0)
@@ -9080,37 +9318,13 @@
&& (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
&& REGNO (operands[4]) == 5"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
- (set_attr "length" "8")])
-
-(define_insn ""
- [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
- (use (match_operand:SI 2 "immediate_operand" "i"))
- (use (match_operand:SI 3 "immediate_operand" "i"))
- (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
- (clobber (reg:SI 6))
- (clobber (reg:SI 7))
- (clobber (reg:SI 8))
- (clobber (reg:SI 9))
- (clobber (reg:SI 10))
- (clobber (reg:SI 11))
- (clobber (reg:SI 12))
- (clobber (match_scratch:SI 5 "X"))]
- "TARGET_STRING && TARGET_POWERPC64
- && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
- || INTVAL (operands[2]) == 0)
- && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
- && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
- && REGNO (operands[4]) == 5"
- "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
;; Move up to 24 bytes at a time. The fixed registers are needed because the
;; register allocator doesn't have a clue about allocating 6 word registers.
;; rD/rS = r5 is preferred, efficient form.
-(define_expand "movstrsi_6reg"
+(define_expand "movmemsi_6reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
@@ -9143,12 +9357,12 @@
&& (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
&& REGNO (operands[4]) == 5"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
(define_insn ""
- [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
(use (match_operand:SI 2 "immediate_operand" "i"))
(use (match_operand:SI 3 "immediate_operand" "i"))
(clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
@@ -9157,41 +9371,20 @@
(clobber (reg:SI 8))
(clobber (reg:SI 9))
(clobber (reg:SI 10))
- (clobber (match_scratch:SI 5 "X"))]
+ (clobber (match_scratch:SI 5 "=X"))]
"TARGET_STRING && ! TARGET_POWER
&& INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 32
&& (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
&& (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
&& REGNO (operands[4]) == 5"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
- (set_attr "length" "8")])
-
-(define_insn ""
- [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
- (use (match_operand:SI 2 "immediate_operand" "i"))
- (use (match_operand:SI 3 "immediate_operand" "i"))
- (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
- (clobber (reg:SI 6))
- (clobber (reg:SI 7))
- (clobber (reg:SI 8))
- (clobber (reg:SI 9))
- (clobber (reg:SI 10))
- (clobber (match_scratch:SI 5 "X"))]
- "TARGET_STRING && TARGET_POWERPC64
- && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 32
- && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
- && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
- && REGNO (operands[4]) == 5"
- "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
;; Move up to 16 bytes at a time, using 4 fixed registers to avoid spill
;; problems with TImode.
;; rD/rS = r5 is preferred, efficient form.
-(define_expand "movstrsi_4reg"
+(define_expand "movmemsi_4reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
@@ -9220,49 +9413,30 @@
&& (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
&& REGNO (operands[4]) == 5"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
(define_insn ""
- [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
(use (match_operand:SI 2 "immediate_operand" "i"))
(use (match_operand:SI 3 "immediate_operand" "i"))
(clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
(clobber (reg:SI 6))
(clobber (reg:SI 7))
(clobber (reg:SI 8))
- (clobber (match_scratch:SI 5 "X"))]
+ (clobber (match_scratch:SI 5 "=X"))]
"TARGET_STRING && ! TARGET_POWER
&& INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
&& (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
&& (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
&& REGNO (operands[4]) == 5"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
- (set_attr "length" "8")])
-
-(define_insn ""
- [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
- (use (match_operand:SI 2 "immediate_operand" "i"))
- (use (match_operand:SI 3 "immediate_operand" "i"))
- (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
- (clobber (reg:SI 6))
- (clobber (reg:SI 7))
- (clobber (reg:SI 8))
- (clobber (match_scratch:SI 5 "X"))]
- "TARGET_STRING && TARGET_POWERPC64
- && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
- && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
- && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
- && REGNO (operands[4]) == 5"
- "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
;; Move up to 8 bytes at a time.
-(define_expand "movstrsi_2reg"
+(define_expand "movmemsi_2reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
@@ -9282,7 +9456,7 @@
"TARGET_STRING && TARGET_POWER && ! TARGET_POWERPC64
&& INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
(define_insn ""
@@ -9291,15 +9465,15 @@
(use (match_operand:SI 2 "immediate_operand" "i"))
(use (match_operand:SI 3 "immediate_operand" "i"))
(clobber (match_scratch:DI 4 "=&r"))
- (clobber (match_scratch:SI 5 "X"))]
+ (clobber (match_scratch:SI 5 "=X"))]
"TARGET_STRING && ! TARGET_POWER && ! TARGET_POWERPC64
&& INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
;; Move up to 4 bytes at a time.
-(define_expand "movstrsi_1reg"
+(define_expand "movmemsi_1reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
@@ -9319,35 +9493,21 @@
"TARGET_STRING && TARGET_POWER
&& INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
(define_insn ""
- [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
(use (match_operand:SI 2 "immediate_operand" "i"))
(use (match_operand:SI 3 "immediate_operand" "i"))
(clobber (match_scratch:SI 4 "=&r"))
- (clobber (match_scratch:SI 5 "X"))]
+ (clobber (match_scratch:SI 5 "=X"))]
"TARGET_STRING && ! TARGET_POWER
&& INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
"{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
+ [(set_attr "type" "store_ux")
(set_attr "length" "8")])
-
-(define_insn ""
- [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
- (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
- (use (match_operand:SI 2 "immediate_operand" "i"))
- (use (match_operand:SI 3 "immediate_operand" "i"))
- (clobber (match_scratch:SI 4 "=&r"))
- (clobber (match_scratch:SI 5 "X"))]
- "TARGET_STRING && TARGET_POWERPC64
- && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
- "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
- [(set_attr "type" "load")
- (set_attr "length" "8")])
-
;; Define insns that do load or store with update. Some of these we can
;; get by using pre-decrement or pre-increment, but the hardware can also
@@ -9370,12 +9530,12 @@
ldu %3,%2(%0)"
[(set_attr "type" "load_ux,load_u")])
-(define_insn "movdi_update"
- [(set (mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
- (match_operand:DI 2 "reg_or_aligned_short_operand" "r,I")))
+(define_insn "movdi_<mode>_update"
+ [(set (mem:DI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
+ (match_operand:P 2 "reg_or_aligned_short_operand" "r,I")))
(match_operand:DI 3 "gpc_reg_operand" "r,r"))
- (set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
- (plus:DI (match_dup 1) (match_dup 2)))]
+ (set (match_operand:P 0 "gpc_reg_operand" "=b,b")
+ (plus:P (match_dup 1) (match_dup 2)))]
"TARGET_POWERPC64 && TARGET_UPDATE"
"@
stdux %3,%0,%2
@@ -9578,170 +9738,214 @@
;; Peephole to convert two consecutive FP loads or stores into lfq/stfq.
-(define_peephole
- [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+(define_insn "*lfq_power2"
+ [(set (match_operand:V2DF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2DF 1 "memory_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "lfq%U1%X1 %0,%1")
+
+(define_peephole2
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
(match_operand:DF 1 "memory_operand" ""))
- (set (match_operand:DF 2 "gpc_reg_operand" "=f")
+ (set (match_operand:DF 2 "gpc_reg_operand" "")
(match_operand:DF 3 "memory_operand" ""))]
"TARGET_POWER2
&& TARGET_HARD_FLOAT && TARGET_FPRS
&& registers_ok_for_quad_peep (operands[0], operands[2])
- && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
- && addrs_ok_for_quad_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
- "lfq%U1%X1 %0,%1")
+ && mems_ok_for_quad_peep (operands[1], operands[3])"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "operands[1] = widen_memory_access (operands[1], V2DFmode, 0);
+ operands[0] = gen_rtx_REG (V2DFmode, REGNO (operands[0]));")
-(define_peephole
+(define_insn "*stfq_power2"
+ [(set (match_operand:V2DF 0 "memory_operand" "")
+ (match_operand:V2DF 1 "gpc_reg_operand" "f"))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "stfq%U0%X0 %1,%0")
+
+
+(define_peephole2
[(set (match_operand:DF 0 "memory_operand" "")
- (match_operand:DF 1 "gpc_reg_operand" "f"))
+ (match_operand:DF 1 "gpc_reg_operand" ""))
(set (match_operand:DF 2 "memory_operand" "")
- (match_operand:DF 3 "gpc_reg_operand" "f"))]
+ (match_operand:DF 3 "gpc_reg_operand" ""))]
"TARGET_POWER2
&& TARGET_HARD_FLOAT && TARGET_FPRS
&& registers_ok_for_quad_peep (operands[1], operands[3])
- && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
- && addrs_ok_for_quad_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
- "stfq%U0%X0 %1,%0")
+ && mems_ok_for_quad_peep (operands[0], operands[2])"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "operands[0] = widen_memory_access (operands[0], V2DFmode, 0);
+ operands[1] = gen_rtx_REG (V2DFmode, REGNO (operands[1]));")
+
+;; After inserting conditional returns we can sometimes have
+;; unnecessary register moves. Unfortunately we cannot have a
+;; modeless peephole here, because some single SImode sets have early
+;; clobber outputs. Although those sets expand to multi-ppc-insn
+;; sequences, using get_attr_length here will smash the operands
+;; array. Neither is there an early_cobbler_p predicate.
+;; Disallow subregs for E500 so we don't munge frob_di_df_2.
+(define_peephole2
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "any_operand" ""))
+ (set (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_dup 0))]
+ "!(TARGET_E500_DOUBLE && GET_CODE (operands[2]) == SUBREG)
+ && peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2) (match_dup 1))])
+
+(define_peephole2
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operand:SF 1 "any_operand" ""))
+ (set (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_dup 0))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2) (match_dup 1))])
+
;; TLS support.
;; "b" output constraint here and on tls_ld to support tls linker optimization.
(define_insn "tls_gd_32"
- [(set (match_operand:SI 0 "register_operand" "=b")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGD))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addi %0,%1,%2@got@tlsgd")
(define_insn "tls_gd_64"
- [(set (match_operand:DI 0 "register_operand" "=b")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGD))]
"HAVE_AS_TLS && TARGET_64BIT"
"addi %0,%1,%2@got@tlsgd")
(define_insn "tls_ld_32"
- [(set (match_operand:SI 0 "register_operand" "=b")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")]
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")]
UNSPEC_TLSLD))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addi %0,%1,%&@got@tlsld")
(define_insn "tls_ld_64"
- [(set (match_operand:DI 0 "register_operand" "=b")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")]
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")]
UNSPEC_TLSLD))]
"HAVE_AS_TLS && TARGET_64BIT"
"addi %0,%1,%&@got@tlsld")
(define_insn "tls_dtprel_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPREL))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addi %0,%1,%2@dtprel")
(define_insn "tls_dtprel_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPREL))]
"HAVE_AS_TLS && TARGET_64BIT"
"addi %0,%1,%2@dtprel")
(define_insn "tls_dtprel_ha_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPRELHA))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addis %0,%1,%2@dtprel@ha")
(define_insn "tls_dtprel_ha_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPRELHA))]
"HAVE_AS_TLS && TARGET_64BIT"
"addis %0,%1,%2@dtprel@ha")
(define_insn "tls_dtprel_lo_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPRELLO))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addi %0,%1,%2@dtprel@l")
(define_insn "tls_dtprel_lo_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPRELLO))]
"HAVE_AS_TLS && TARGET_64BIT"
"addi %0,%1,%2@dtprel@l")
(define_insn "tls_got_dtprel_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTDTPREL))]
"HAVE_AS_TLS && !TARGET_64BIT"
"lwz %0,%2@got@dtprel(%1)")
(define_insn "tls_got_dtprel_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTDTPREL))]
"HAVE_AS_TLS && TARGET_64BIT"
"ld %0,%2@got@dtprel(%1)")
(define_insn "tls_tprel_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPREL))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addi %0,%1,%2@tprel")
(define_insn "tls_tprel_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPREL))]
"HAVE_AS_TLS && TARGET_64BIT"
"addi %0,%1,%2@tprel")
(define_insn "tls_tprel_ha_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPRELHA))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addis %0,%1,%2@tprel@ha")
(define_insn "tls_tprel_ha_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPRELHA))]
"HAVE_AS_TLS && TARGET_64BIT"
"addis %0,%1,%2@tprel@ha")
(define_insn "tls_tprel_lo_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPRELLO))]
"HAVE_AS_TLS && !TARGET_64BIT"
"addi %0,%1,%2@tprel@l")
(define_insn "tls_tprel_lo_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPRELLO))]
"HAVE_AS_TLS && TARGET_64BIT"
@@ -9751,32 +9955,32 @@
;; optimization. The linker may edit the instructions emitted by a
;; tls_got_tprel/tls_tls pair to addis,addi.
(define_insn "tls_got_tprel_32"
- [(set (match_operand:SI 0 "register_operand" "=b")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTTPREL))]
"HAVE_AS_TLS && !TARGET_64BIT"
"lwz %0,%2@got@tprel(%1)")
(define_insn "tls_got_tprel_64"
- [(set (match_operand:DI 0 "register_operand" "=b")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTTPREL))]
"HAVE_AS_TLS && TARGET_64BIT"
"ld %0,%2@got@tprel(%1)")
(define_insn "tls_tls_32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTLS))]
"HAVE_AS_TLS && !TARGET_64BIT"
"add %0,%1,%2@tls")
(define_insn "tls_tls_64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "b")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTLS))]
"HAVE_AS_TLS && TARGET_64BIT"
@@ -9804,7 +10008,7 @@
if (current_function_limit_stack)
{
rtx available;
- available = expand_binop (Pmode, sub_optab,
+ available = expand_binop (Pmode, sub_optab,
stack_pointer_rtx, stack_limit_rtx,
NULL_RTX, 1, OPTAB_WIDEN);
emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx));
@@ -9824,7 +10028,7 @@
neg_op0 = GEN_INT (- INTVAL (operands[1]));
if (TARGET_UPDATE)
- emit_insn ((* ((TARGET_32BIT) ? gen_movsi_update : gen_movdi_update))
+ emit_insn ((* ((TARGET_32BIT) ? gen_movsi_update : gen_movdi_di_update))
(stack_pointer_rtx, stack_pointer_rtx, neg_op0, chain));
else
@@ -9859,52 +10063,58 @@
""
"DONE;")
+;; Adjust stack pointer (op0) to a new value (op1).
+;; First copy old stack backchain to new location, and ensure that the
+;; scheduler won't reorder the sp assignment before the backchain write.
(define_expand "restore_stack_block"
- [(use (match_operand 0 "register_operand" ""))
- (set (match_dup 2) (match_dup 3))
- (set (match_dup 0) (match_operand 1 "register_operand" ""))
- (set (match_dup 3) (match_dup 2))]
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 2))
+ (set (match_dup 5) (unspec:BLK [(match_dup 5)] UNSPEC_TIE))
+ (set (match_operand 0 "register_operand" "")
+ (match_operand 1 "register_operand" ""))]
""
"
{
operands[2] = gen_reg_rtx (Pmode);
- operands[3] = gen_rtx_MEM (Pmode, operands[0]);
+ operands[3] = gen_frame_mem (Pmode, operands[0]);
+ operands[4] = gen_frame_mem (Pmode, operands[1]);
+ operands[5] = gen_frame_mem (BLKmode, operands[0]);
}")
(define_expand "save_stack_nonlocal"
- [(match_operand 0 "memory_operand" "")
- (match_operand 1 "register_operand" "")]
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_operand 0 "memory_operand" "") (match_dup 3))
+ (set (match_dup 2) (match_operand 1 "register_operand" ""))]
""
"
{
- rtx temp = gen_reg_rtx (Pmode);
+ int units_per_word = (TARGET_32BIT) ? 4 : 8;
/* Copy the backchain to the first word, sp to the second. */
- emit_move_insn (temp, gen_rtx_MEM (Pmode, operands[1]));
- emit_move_insn (operand_subword (operands[0], 0, 0,
- (TARGET_32BIT ? DImode : TImode)),
- temp);
- emit_move_insn (operand_subword (operands[0], 1, 0, (TARGET_32BIT ? DImode : TImode)),
- operands[1]);
- DONE;
+ operands[0] = adjust_address_nv (operands[0], Pmode, 0);
+ operands[2] = adjust_address_nv (operands[0], Pmode, units_per_word);
+ operands[3] = gen_reg_rtx (Pmode);
+ operands[4] = gen_frame_mem (Pmode, operands[1]);
}")
(define_expand "restore_stack_nonlocal"
- [(match_operand 0 "register_operand" "")
- (match_operand 1 "memory_operand" "")]
+ [(set (match_dup 2) (match_operand 1 "memory_operand" ""))
+ (set (match_dup 3) (match_dup 4))
+ (set (match_dup 5) (match_dup 2))
+ (set (match_dup 6) (unspec:BLK [(match_dup 6)] UNSPEC_TIE))
+ (set (match_operand 0 "register_operand" "") (match_dup 3))]
""
"
{
- rtx temp = gen_reg_rtx (Pmode);
+ int units_per_word = (TARGET_32BIT) ? 4 : 8;
/* Restore the backchain from the first word, sp from the second. */
- emit_move_insn (temp,
- operand_subword (operands[1], 0, 0, (TARGET_32BIT ? DImode : TImode)));
- emit_move_insn (operands[0],
- operand_subword (operands[1], 1, 0,
- (TARGET_32BIT ? DImode : TImode)));
- emit_move_insn (gen_rtx_MEM (Pmode, operands[0]), temp);
- DONE;
+ operands[2] = gen_reg_rtx (Pmode);
+ operands[3] = gen_reg_rtx (Pmode);
+ operands[1] = adjust_address_nv (operands[1], Pmode, 0);
+ operands[4] = adjust_address_nv (operands[1], Pmode, units_per_word);
+ operands[5] = gen_frame_mem (Pmode, operands[3]);
+ operands[6] = gen_frame_mem (BLKmode, operands[0]);
}")
;; TOC register handling.
@@ -9960,7 +10170,8 @@
[(set (match_operand:SI 0 "register_operand" "=l")
(match_operand:SI 1 "immediate_operand" "s"))
(use (unspec [(match_dup 1)] UNSPEC_TOC))]
- "TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2"
+ "TARGET_ELF && DEFAULT_ABI != ABI_AIX
+ && (flag_pic == 2 || (flag_pic && TARGET_SECURE_PLT))"
"bcl 20,31,%1\\n%1:"
[(set_attr "type" "branch")
(set_attr "length" "4")])
@@ -9983,24 +10194,22 @@
"{l|lwz} %0,%2-%3(%1)"
[(set_attr "type" "load")])
-(define_insn "load_macho_picbase"
- [(set (match_operand:SI 0 "register_operand" "=l")
- (unspec:SI [(match_operand:SI 1 "immediate_operand" "s")]
- UNSPEC_LD_MPIC))]
- "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
- "bcl 20,31,%1\\n%1:"
- [(set_attr "type" "branch")
- (set_attr "length" "4")])
+(define_insn "load_toc_v4_PIC_3b"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (high:SI
+ (minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
+ (match_operand:SI 3 "symbol_ref_operand" "s")))))]
+ "TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic"
+ "{cau|addis} %0,%1,%2-%3@ha")
-(define_insn "macho_correct_pic"
+(define_insn "load_toc_v4_PIC_3c"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (unspec:SI [(match_operand:SI 2 "immediate_operand" "s")
- (match_operand:SI 3 "immediate_operand" "s")]
- UNSPEC_MPIC_CORRECT)))]
- "DEFAULT_ABI == ABI_DARWIN"
- "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)"
- [(set_attr "length" "8")])
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
+ (match_operand:SI 3 "symbol_ref_operand" "s"))))]
+ "TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic"
+ "{cal|addi} %0,%1,%2-%3@l")
;; If the TOC is shared over a translation unit, as happens with all
;; the kinds of PIC that we support, we need to restore the TOC
@@ -10035,6 +10244,25 @@
rs6000_emit_load_toc_table (FALSE);
DONE;
}")
+
+;; Elf specific ways of loading addresses for non-PIC code.
+;; The output of this could be r0, but we make a very strong
+;; preference for a base register because it will usually
+;; be needed there.
+(define_insn "elf_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "{liu|lis} %0,%1@ha")
+
+(define_insn "elf_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "@
+ {cal|la} %0,%2@l(%1)
+ {ai|addic} %0,%1,%K2")
;; A function pointer under AIX is a pointer to a data area whose first word
;; contains the actual address of the function, whose second word contains a
@@ -10146,11 +10374,30 @@
operands[0] = machopic_indirect_call_target (operands[0]);
#endif
- if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != CONST_INT)
- abort ();
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ gcc_assert (GET_CODE (operands[1]) == CONST_INT);
operands[0] = XEXP (operands[0], 0);
+ if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
+ && flag_pic
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (operands[0]))
+ {
+ rtx call;
+ rtvec tmp;
+
+ tmp = gen_rtvec (3,
+ gen_rtx_CALL (VOIDmode,
+ gen_rtx_MEM (SImode, operands[0]),
+ operands[1]),
+ gen_rtx_USE (VOIDmode, operands[2]),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
+ call = emit_call_insn (gen_rtx_PARALLEL (VOIDmode, tmp));
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call), pic_offset_table_rtx);
+ DONE;
+ }
+
if (GET_CODE (operands[0]) != SYMBOL_REF
|| (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (operands[0]))
|| (DEFAULT_ABI != ABI_DARWIN && (INTVAL (operands[2]) & CALL_LONG) != 0))
@@ -10158,12 +10405,14 @@
if (INTVAL (operands[2]) & CALL_LONG)
operands[0] = rs6000_longcall_ref (operands[0]);
- if (DEFAULT_ABI == ABI_V4
- || DEFAULT_ABI == ABI_DARWIN)
- operands[0] = force_reg (Pmode, operands[0]);
+ switch (DEFAULT_ABI)
+ {
+ case ABI_V4:
+ case ABI_DARWIN:
+ operands[0] = force_reg (Pmode, operands[0]);
+ break;
- else if (DEFAULT_ABI == ABI_AIX)
- {
+ case ABI_AIX:
/* AIX function pointers are really pointers to a three word
area. */
emit_call_insn (TARGET_32BIT
@@ -10174,9 +10423,10 @@
operands[0]),
operands[1]));
DONE;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
}
}")
@@ -10194,11 +10444,33 @@
operands[1] = machopic_indirect_call_target (operands[1]);
#endif
- if (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != CONST_INT)
- abort ();
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
operands[1] = XEXP (operands[1], 0);
+ if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
+ && flag_pic
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (operands[1]))
+ {
+ rtx call;
+ rtvec tmp;
+
+ tmp = gen_rtvec (3,
+ gen_rtx_SET (VOIDmode,
+ operands[0],
+ gen_rtx_CALL (VOIDmode,
+ gen_rtx_MEM (SImode,
+ operands[1]),
+ operands[2])),
+ gen_rtx_USE (VOIDmode, operands[3]),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
+ call = emit_call_insn (gen_rtx_PARALLEL (VOIDmode, tmp));
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call), pic_offset_table_rtx);
+ DONE;
+ }
+
if (GET_CODE (operands[1]) != SYMBOL_REF
|| (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (operands[1]))
|| (DEFAULT_ABI != ABI_DARWIN && (INTVAL (operands[3]) & CALL_LONG) != 0))
@@ -10206,12 +10478,14 @@
if (INTVAL (operands[3]) & CALL_LONG)
operands[1] = rs6000_longcall_ref (operands[1]);
- if (DEFAULT_ABI == ABI_V4
- || DEFAULT_ABI == ABI_DARWIN)
- operands[1] = force_reg (Pmode, operands[1]);
+ switch (DEFAULT_ABI)
+ {
+ case ABI_V4:
+ case ABI_DARWIN:
+ operands[1] = force_reg (Pmode, operands[1]);
+ break;
- else if (DEFAULT_ABI == ABI_AIX)
- {
+ case ABI_AIX:
/* AIX function pointers are really pointers to a three word
area. */
emit_call_insn (TARGET_32BIT
@@ -10224,9 +10498,10 @@
operands[1]),
operands[2]));
DONE;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
}
}")
@@ -10323,13 +10598,13 @@
;; and < 0 if they were not.
(define_insn "*call_indirect_nonlocal_aix32"
- [(call (mem:SI (match_operand:SI 0 "register_operand" "cl"))
- (match_operand 1 "" "g"))
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
(use (reg:SI 2))
(use (reg:SI 11))
(set (reg:SI 2)
(mem:SI (plus:SI (reg:SI 1) (const_int 20))))
- (clobber (match_scratch:SI 2 "=l"))]
+ (clobber (match_scratch:SI 2 "=l,l"))]
"TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
"b%T0l\;{l|lwz} 2,20(1)"
[(set_attr "type" "jmpreg")
@@ -10348,13 +10623,13 @@
(set_attr "length" "8")])
(define_insn "*call_indirect_nonlocal_aix64"
- [(call (mem:SI (match_operand:DI 0 "register_operand" "cl"))
- (match_operand 1 "" "g"))
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
(use (reg:DI 2))
(use (reg:DI 11))
(set (reg:DI 2)
(mem:DI (plus:DI (reg:DI 1) (const_int 40))))
- (clobber (match_scratch:SI 2 "=l"))]
+ (clobber (match_scratch:SI 2 "=l,l"))]
"TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
"b%T0l\;ld 2,40(1)"
[(set_attr "type" "jmpreg")
@@ -10365,7 +10640,7 @@
(match_operand 1 "" "g"))
(use (match_operand:SI 2 "immediate_operand" "O"))
(clobber (match_scratch:SI 3 "=l"))]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
"bl %z0\;%."
@@ -10374,13 +10649,13 @@
(define_insn "*call_value_indirect_nonlocal_aix32"
[(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:SI 1 "register_operand" "cl"))
- (match_operand 2 "" "g")))
+ (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
(use (reg:SI 2))
(use (reg:SI 11))
(set (reg:SI 2)
(mem:SI (plus:SI (reg:SI 1) (const_int 20))))
- (clobber (match_scratch:SI 3 "=l"))]
+ (clobber (match_scratch:SI 3 "=l,l"))]
"TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
"b%T1l\;{l|lwz} 2,20(1)"
[(set_attr "type" "jmpreg")
@@ -10401,13 +10676,13 @@
(define_insn "*call_value_indirect_nonlocal_aix64"
[(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:DI 1 "register_operand" "cl"))
- (match_operand 2 "" "g")))
+ (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
(use (reg:DI 2))
(use (reg:DI 11))
(set (reg:DI 2)
(mem:DI (plus:DI (reg:DI 1) (const_int 40))))
- (clobber (match_scratch:SI 3 "=l"))]
+ (clobber (match_scratch:SI 3 "=l,l"))]
"TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
"b%T1l\;ld 2,40(1)"
[(set_attr "type" "jmpreg")
@@ -10419,7 +10694,7 @@
(match_operand 2 "" "g")))
(use (match_operand:SI 3 "immediate_operand" "O"))
(clobber (match_scratch:SI 4 "=l"))]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
"bl %z1\;%."
@@ -10432,11 +10707,11 @@
;; operands[2] is the value FUNCTION_ARG returns for the VOID argument
;; which indicates how to set cr1
-(define_insn "*call_indirect_nonlocal_sysv"
- [(call (mem:SI (match_operand:SI 0 "register_operand" "cl,cl"))
- (match_operand 1 "" "g,g"))
- (use (match_operand:SI 2 "immediate_operand" "O,n"))
- (clobber (match_scratch:SI 3 "=l,l"))]
+(define_insn "*call_indirect_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 3 "=l,l,l,l"))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
@@ -10448,11 +10723,11 @@
return "b%T0l";
}
- [(set_attr "type" "jmpreg,jmpreg")
- (set_attr "length" "4,8")])
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
-(define_insn "*call_nonlocal_sysv"
- [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s,s"))
+(define_insn "*call_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
(match_operand 1 "" "g,g"))
(use (match_operand:SI 2 "immediate_operand" "O,n"))
(clobber (match_scratch:SI 3 "=l,l"))]
@@ -10469,18 +10744,29 @@
#if TARGET_MACHO
return output_call(insn, operands, 0, 2);
#else
- return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "bl %z0@plt" : "bl %z0";
-#endif
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ /* The magic 32768 offset here and in the other sysv call insns
+ corresponds to the offset of r30 in .got2, as given by LCTOC1.
+ See sysv4.h:toc_section. */
+ return "bl %z0+32768@plt";
+ else
+ return "bl %z0@plt";
+ }
+ else
+ return "bl %z0";
+#endif
}
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
-(define_insn "*call_value_indirect_nonlocal_sysv"
+(define_insn "*call_value_indirect_nonlocal_sysv<mode>"
[(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:SI 1 "register_operand" "cl,cl"))
- (match_operand 2 "" "g,g")))
- (use (match_operand:SI 3 "immediate_operand" "O,n"))
- (clobber (match_scratch:SI 4 "=l,l"))]
+ (call (mem:SI (match_operand:P 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 4 "=l,l,l,l"))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
@@ -10492,12 +10778,12 @@
return "b%T1l";
}
- [(set_attr "type" "jmpreg,jmpreg")
- (set_attr "length" "4,8")])
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
-(define_insn "*call_value_nonlocal_sysv"
+(define_insn "*call_value_nonlocal_sysv<mode>"
[(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "s,s"))
+ (call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
(match_operand 2 "" "g,g")))
(use (match_operand:SI 3 "immediate_operand" "O,n"))
(clobber (match_scratch:SI 4 "=l,l"))]
@@ -10514,8 +10800,16 @@
#if TARGET_MACHO
return output_call(insn, operands, 1, 3);
#else
- return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "bl %z1@plt" : "bl %z1";
-#endif
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return "bl %z1+32768@plt";
+ else
+ return "bl %z1@plt";
+ }
+ else
+ return "bl %z1";
+#endif
}
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
@@ -10563,8 +10857,8 @@
operands[0] = machopic_indirect_call_target (operands[0]);
#endif
- if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != CONST_INT)
- abort ();
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ gcc_assert (GET_CODE (operands[1]) == CONST_INT);
operands[0] = XEXP (operands[0], 0);
operands[3] = gen_reg_rtx (SImode);
@@ -10677,7 +10971,7 @@
(use (match_operand:SI 2 "immediate_operand" "O"))
(use (match_operand:SI 3 "register_operand" "l"))
(return)]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
"b %z0"
@@ -10705,15 +10999,15 @@
(use (match_operand:SI 3 "immediate_operand" "O"))
(use (match_operand:SI 4 "register_operand" "l"))
(return)]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
"b %z1"
[(set_attr "type" "branch")
(set_attr "length" "4")])
-(define_insn "*sibcall_nonlocal_sysv"
- [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s,s"))
+(define_insn "*sibcall_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
(match_operand 1 "" ""))
(use (match_operand 2 "immediate_operand" "O,n"))
(use (match_operand:SI 3 "register_operand" "l,l"))
@@ -10729,7 +11023,15 @@
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn (\"creqv 6,6,6\", operands);
- return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z0@plt\" : \"b %z0\";
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return \"b %z0+32768@plt\";
+ else
+ return \"b %z0@plt\";
+ }
+ else
+ return \"b %z0\";
}"
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
@@ -10749,17 +11051,17 @@
operands[1] = machopic_indirect_call_target (operands[1]);
#endif
- if (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != CONST_INT)
- abort ();
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
operands[1] = XEXP (operands[1], 0);
operands[4] = gen_reg_rtx (SImode);
}")
-(define_insn "*sibcall_value_nonlocal_sysv"
+(define_insn "*sibcall_value_nonlocal_sysv<mode>"
[(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "s,s"))
+ (call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
(match_operand 2 "" "")))
(use (match_operand:SI 3 "immediate_operand" "O,n"))
(use (match_operand:SI 4 "register_operand" "l,l"))
@@ -10775,7 +11077,15 @@
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn (\"creqv 6,6,6\", operands);
- return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z1@plt\" : \"b %z1\";
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return \"b %z1+32768@plt\";
+ else
+ return \"b %z1@plt\";
+ }
+ else
+ return \"b %z1\";
}"
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
@@ -10803,10 +11113,10 @@
;; Start with the DEFINE_EXPANDs to generate the rtl for compares, scc
;; insns, and branches. We store the operands of compares until we see
;; how it is used.
-(define_expand "cmpsi"
+(define_expand "cmp<mode>"
[(set (cc0)
- (compare (match_operand:SI 0 "gpc_reg_operand" "")
- (match_operand:SI 1 "reg_or_short_operand" "")))]
+ (compare (match_operand:GPR 0 "gpc_reg_operand" "")
+ (match_operand:GPR 1 "reg_or_short_operand" "")))]
""
"
{
@@ -10814,26 +11124,7 @@
this might be a logical operation. That insn doesn't exist. */
if (GET_CODE (operands[1]) == CONST_INT
&& INTVAL (operands[1]) < 0)
- operands[1] = force_reg (SImode, operands[1]);
-
- rs6000_compare_op0 = operands[0];
- rs6000_compare_op1 = operands[1];
- rs6000_compare_fp_p = 0;
- DONE;
-}")
-
-(define_expand "cmpdi"
- [(set (cc0)
- (compare (match_operand:DI 0 "gpc_reg_operand" "")
- (match_operand:DI 1 "reg_or_short_operand" "")))]
- "TARGET_POWERPC64"
- "
-{
- /* Take care of the possibility that operands[1] might be negative but
- this might be a logical operation. That insn doesn't exist. */
- if (GET_CODE (operands[1]) == CONST_INT
- && INTVAL (operands[1]) < 0)
- operands[1] = force_reg (DImode, operands[1]);
+ operands[1] = force_reg (<MODE>mode, operands[1]);
rs6000_compare_op0 = operands[0];
rs6000_compare_op1 = operands[1];
@@ -10841,35 +11132,10 @@
DONE;
}")
-(define_expand "cmpsf"
- [(set (cc0) (compare (match_operand:SF 0 "gpc_reg_operand" "")
- (match_operand:SF 1 "gpc_reg_operand" "")))]
- "TARGET_HARD_FLOAT"
- "
-{
- rs6000_compare_op0 = operands[0];
- rs6000_compare_op1 = operands[1];
- rs6000_compare_fp_p = 1;
- DONE;
-}")
-
-(define_expand "cmpdf"
- [(set (cc0) (compare (match_operand:DF 0 "gpc_reg_operand" "")
- (match_operand:DF 1 "gpc_reg_operand" "")))]
- "TARGET_HARD_FLOAT && TARGET_FPRS"
- "
-{
- rs6000_compare_op0 = operands[0];
- rs6000_compare_op1 = operands[1];
- rs6000_compare_fp_p = 1;
- DONE;
-}")
-
-(define_expand "cmptf"
- [(set (cc0) (compare (match_operand:TF 0 "gpc_reg_operand" "")
- (match_operand:TF 1 "gpc_reg_operand" "")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
- && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+(define_expand "cmp<mode>"
+ [(set (cc0) (compare (match_operand:FP 0 "gpc_reg_operand" "")
+ (match_operand:FP 1 "gpc_reg_operand" "")))]
+ ""
"
{
rs6000_compare_op0 = operands[0];
@@ -10930,12 +11196,12 @@
(define_expand "bunordered"
[(use (match_operand 0 "" ""))]
- ""
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
"{ rs6000_emit_cbranch (UNORDERED, operands[0]); DONE; }")
(define_expand "bordered"
[(use (match_operand 0 "" ""))]
- ""
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
"{ rs6000_emit_cbranch (ORDERED, operands[0]); DONE; }")
(define_expand "buneq"
@@ -10982,11 +11248,11 @@
[(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
""
"
-{
+{
if (! rs6000_compare_fp_p)
FAIL;
- rs6000_emit_sCOND (NE, operands[0]);
+ rs6000_emit_sCOND (NE, operands[0]);
DONE;
}")
@@ -10996,8 +11262,7 @@
""
"
{
- if (! rs6000_compare_fp_p
- && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
FAIL;
rs6000_emit_sCOND (GE, operands[0]);
@@ -11010,11 +11275,10 @@
""
"
{
- if (! rs6000_compare_fp_p
- && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
FAIL;
- rs6000_emit_sCOND (GT, operands[0]);
+ rs6000_emit_sCOND (GT, operands[0]);
DONE;
}")
@@ -11024,11 +11288,10 @@
""
"
{
- if (! rs6000_compare_fp_p
- && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
FAIL;
- rs6000_emit_sCOND (LE, operands[0]);
+ rs6000_emit_sCOND (LE, operands[0]);
DONE;
}")
@@ -11038,11 +11301,10 @@
""
"
{
- if (! rs6000_compare_fp_p
- && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
FAIL;
- rs6000_emit_sCOND (LT, operands[0]);
+ rs6000_emit_sCOND (LT, operands[0]);
DONE;
}")
@@ -11068,12 +11330,12 @@
(define_expand "sunordered"
[(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
- ""
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
"{ rs6000_emit_sCOND (UNORDERED, operands[0]); DONE; }")
(define_expand "sordered"
[(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
- ""
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
"{ rs6000_emit_sCOND (ORDERED, operands[0]); DONE; }")
(define_expand "suneq"
@@ -11106,51 +11368,138 @@
""
"{ rs6000_emit_sCOND (LTGT, operands[0]); DONE; }")
+(define_expand "stack_protect_set"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")]
+ ""
+{
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
+ rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+#endif
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_setdi (operands[0], operands[1]));
+ else
+ emit_insn (gen_stack_protect_setsi (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "stack_protect_setsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:SI 2 "=&r") (const_int 0))]
+ "TARGET_32BIT"
+ "{l%U1%X1|lwz%U1%X1} %2,%1\;{st%U0%X0|stw%U0%X0} %2,%0\;{lil|li} %2,0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "stack_protect_setdi"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:DI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:DI 2 "=&r") (const_int 0))]
+ "TARGET_64BIT"
+ "ld%U1%X1 %2,%1\;std%U0%X0 %2,%0\;{lil|li} %2,0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_expand "stack_protect_test"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")]
+ ""
+{
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
+ rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+#endif
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, operands[1]),
+ UNSPEC_SP_TEST);
+ rs6000_compare_fp_p = 0;
+ emit_jump_insn (gen_beq (operands[2]));
+ DONE;
+})
+
+(define_insn "stack_protect_testsi"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
+ (unspec:CCEQ [(match_operand:SI 1 "memory_operand" "m,m")
+ (match_operand:SI 2 "memory_operand" "m,m")]
+ UNSPEC_SP_TEST))
+ (set (match_scratch:SI 4 "=r,r") (const_int 0))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {l%U1%X1|lwz%U1%X1} %3,%1\;{l%U2%X2|lwz%U2%X2} %4,%2\;xor. %3,%3,%4\;{lil|li} %4,0
+ {l%U1%X1|lwz%U1%X1} %3,%1\;{l%U2%X2|lwz%U2%X2} %4,%2\;{cmpl|cmplw} %0,%3,%4\;{lil|li} %3,0\;{lil|li} %4,0"
+ [(set_attr "length" "16,20")])
+
+(define_insn "stack_protect_testdi"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
+ (unspec:CCEQ [(match_operand:DI 1 "memory_operand" "m,m")
+ (match_operand:DI 2 "memory_operand" "m,m")]
+ UNSPEC_SP_TEST))
+ (set (match_scratch:DI 4 "=r,r") (const_int 0))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ ld%U1%X1 %3,%1\;ld%U2%X2 %4,%2\;xor. %3,%3,%4\;{lil|li} %4,0
+ ld%U1%X1 %3,%1\;ld%U2%X2 %4,%2\;cmpld %0,%3,%4\;{lil|li} %3,0\;{lil|li} %4,0"
+ [(set_attr "length" "16,20")])
+
;; Here are the actual compare insns.
-(define_insn "*cmpsi_internal1"
+(define_insn "*cmp<mode>_internal1"
[(set (match_operand:CC 0 "cc_reg_operand" "=y")
- (compare:CC (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI")))]
+ (compare:CC (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "reg_or_short_operand" "rI")))]
""
- "{cmp%I2|cmpw%I2} %0,%1,%2"
- [(set_attr "type" "cmp")])
-
-(define_insn "*cmpdi_internal1"
- [(set (match_operand:CC 0 "cc_reg_operand" "=y")
- (compare:CC (match_operand:DI 1 "gpc_reg_operand" "r")
- (match_operand:DI 2 "reg_or_short_operand" "rI")))]
- "TARGET_POWERPC64"
- "cmpd%I2 %0,%1,%2"
+ "{cmp%I2|cmp<wd>%I2} %0,%1,%2"
[(set_attr "type" "cmp")])
;; If we are comparing a register for equality with a large constant,
-;; we can do this with an XOR followed by a compare. But we need a scratch
-;; register for the result of the XOR.
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_operand" "")
- (compare:CC (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "non_short_cint_operand" "")))
- (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
- "find_single_use (operands[0], insn, 0)
- && (GET_CODE (*find_single_use (operands[0], insn, 0)) == EQ
- || GET_CODE (*find_single_use (operands[0], insn, 0)) == NE)"
- [(set (match_dup 3) (xor:SI (match_dup 1) (match_dup 4)))
- (set (match_dup 0) (compare:CC (match_dup 3) (match_dup 5)))]
- "
+;; we can do this with an XOR followed by a compare. But this is profitable
+;; only if the large constant is only used for the comparison (and in this
+;; case we already have a register to reuse as scratch).
+;;
+;; For 64-bit registers, we could only do so if the constant's bit 15 is clear:
+;; otherwise we'd need to XOR with FFFFFFFF????0000 which is not available.
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "logical_const_operand" ""))
+ (set (match_dup 0) (match_operator:SI 3 "boolean_or_operator"
+ [(match_dup 0)
+ (match_operand:SI 2 "logical_const_operand" "")]))
+ (set (match_operand:CC 4 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 5 "gpc_reg_operand" "")
+ (match_dup 0)))
+ (set (pc)
+ (if_then_else (match_operator 6 "equality_operator"
+ [(match_dup 4) (const_int 0)])
+ (match_operand 7 "" "")
+ (match_operand 8 "" "")))]
+ "peep2_reg_dead_p (3, operands[0])
+ && peep2_reg_dead_p (4, operands[4])"
+ [(set (match_dup 0) (xor:SI (match_dup 5) (match_dup 9)))
+ (set (match_dup 4) (compare:CC (match_dup 0) (match_dup 10)))
+ (set (pc) (if_then_else (match_dup 6) (match_dup 7) (match_dup 8)))]
+
{
- /* Get the constant we are comparing against, C, and see what it looks like
- sign-extended to 16 bits. Then see what constant could be XOR'ed
- with C to get the sign-extended value. */
-
- HOST_WIDE_INT c = INTVAL (operands[2]);
+ /* Get the constant we are comparing against, and see what it looks like
+ when sign-extended from 16 to 32 bits. Then see what constant we could
+ XOR with SEXTC to get the sign-extended value. */
+ rtx cnst = simplify_const_binary_operation (GET_CODE (operands[3]),
+ SImode,
+ operands[1], operands[2]);
+ HOST_WIDE_INT c = INTVAL (cnst);
HOST_WIDE_INT sextc = ((c & 0xffff) ^ 0x8000) - 0x8000;
HOST_WIDE_INT xorv = c ^ sextc;
- operands[4] = GEN_INT (xorv);
- operands[5] = GEN_INT (sextc);
-}")
+ operands[9] = GEN_INT (xorv);
+ operands[10] = GEN_INT (sextc);
+})
(define_insn "*cmpsi_internal2"
[(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
@@ -11234,7 +11583,7 @@
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
(match_operand:TF 2 "gpc_reg_operand" "f")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && !TARGET_XL_COMPAT
+ "!TARGET_IEEEQUAD && !TARGET_XL_COMPAT
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"fcmpu %0,%1,%2\;bne %0,$+8\;fcmpu %0,%L1,%L2"
[(set_attr "type" "fpcompare")
@@ -11252,7 +11601,7 @@
(clobber (match_scratch:DF 8 "=f"))
(clobber (match_scratch:DF 9 "=f"))
(clobber (match_scratch:DF 10 "=f"))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && TARGET_XL_COMPAT
+ "!TARGET_IEEEQUAD && TARGET_XL_COMPAT
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"#"
"&& reload_completed"
@@ -11290,14 +11639,12 @@
DFmode));
if (TARGET_TOC)
{
- operands[13] = gen_rtx_MEM (DFmode,
- create_TOC_reference (XEXP (operands[13], 0)));
- operands[14] = gen_rtx_MEM (DFmode,
- create_TOC_reference (XEXP (operands[14], 0)));
+ operands[13] = gen_const_mem (DFmode,
+ create_TOC_reference (XEXP (operands[13], 0)));
+ operands[14] = gen_const_mem (DFmode,
+ create_TOC_reference (XEXP (operands[14], 0)));
set_mem_alias_set (operands[13], get_TOC_alias_set ());
set_mem_alias_set (operands[14], get_TOC_alias_set ());
- RTX_UNCHANGING_P (operands[13]) = 1;
- RTX_UNCHANGING_P (operands[14]) = 1;
}
})
@@ -11320,16 +11667,16 @@
(const_string "mfcrf")
]
(const_string "mfcr")))
- (set_attr "length" "12")])
+ (set_attr "length" "8")])
-;; Same as above, but get the EQ bit.
-(define_insn "move_from_CR_eq_bit"
+;; Same as above, but get the GT bit.
+(define_insn "move_from_CR_gt_bit"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_EQ))]
+ (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_GT))]
"TARGET_E500"
- "mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,1"
+ "mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,31,31"
[(set_attr "type" "mfcr")
- (set_attr "length" "12")])
+ (set_attr "length" "8")])
;; Same as above, but get the OV/ORDERED bit.
(define_insn "move_from_CR_ov_bit"
@@ -11338,7 +11685,7 @@
"TARGET_ISEL"
"mfcr %0\;{rlinm|rlwinm} %0,%0,%t1,1"
[(set_attr "type" "mfcr")
- (set_attr "length" "12")])
+ (set_attr "length" "8")])
(define_insn ""
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
@@ -11352,7 +11699,7 @@
(const_string "mfcrf")
]
(const_string "mfcr")))
- (set_attr "length" "12")])
+ (set_attr "length" "8")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
@@ -11367,7 +11714,7 @@
mfcr %3%Q2\;{rlinm.|rlwinm.} %3,%3,%J1,1
#"
[(set_attr "type" "delayed_compare")
- (set_attr "length" "12,16")])
+ (set_attr "length" "8,16")])
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
@@ -11413,7 +11760,7 @@
(const_string "mfcrf")
]
(const_string "mfcr")))
- (set_attr "length" "12")])
+ (set_attr "length" "8")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
@@ -11448,7 +11795,7 @@
return \"mfcr %4%Q2\;{rlinm.|rlwinm.} %4,%4,%5,%6,%6\";
}"
[(set_attr "type" "delayed_compare")
- (set_attr "length" "12,16")])
+ (set_attr "length" "8,16")])
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
@@ -11485,7 +11832,7 @@
"REGNO (operands[2]) != REGNO (operands[5])"
"mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
[(set_attr "type" "mfcr")
- (set_attr "length" "20")])
+ (set_attr "length" "12")])
(define_peephole
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
@@ -11499,7 +11846,7 @@
"TARGET_POWERPC64 && REGNO (operands[2]) != REGNO (operands[5])"
"mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
[(set_attr "type" "mfcr")
- (set_attr "length" "20")])
+ (set_attr "length" "12")])
;; There are some scc insns that can be done directly, without a compare.
;; These are faster because they don't involve the communications between
@@ -11515,117 +11862,96 @@
;; otherwise won't accept constants. We do this because it is faster than
;; the cmp/mfcr sequence we would otherwise generate.
-(define_insn ""
+(define_mode_attr scc_eq_op2 [(SI "rKLI")
+ (DI "rKJI")])
+
+(define_insn_and_split "*eq<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (eq:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "scc_eq_operand" "<scc_eq_op2>")))]
+ "!TARGET_POWER"
+ "#"
+ "!TARGET_POWER"
+ [(set (match_dup 0)
+ (clz:GPR (match_dup 3)))
+ (set (match_dup 0)
+ (lshiftrt:GPR (match_dup 0) (match_dup 4)))]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[3] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[3] = operands[1];
+
+ operands[4] = GEN_INT (exact_log2 (GET_MODE_BITSIZE (<MODE>mode)));
+ })
+
+(define_insn_and_split "*eq<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=y")
+ (compare:CC
+ (eq:P (match_operand:P 1 "gpc_reg_operand" "=r")
+ (match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (eq:P (match_dup 1) (match_dup 2)))]
+ "!TARGET_POWER && optimize_size"
+ "#"
+ "!TARGET_POWER && optimize_size"
+ [(set (match_dup 0)
+ (clz:P (match_dup 4)))
+ (parallel [(set (match_dup 3)
+ (compare:CC (lshiftrt:P (match_dup 0) (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 0)
+ (lshiftrt:P (match_dup 0) (match_dup 5)))])]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[4] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[4] = operands[1];
+
+ operands[5] = GEN_INT (exact_log2 (GET_MODE_BITSIZE (<MODE>mode)));
+ })
+
+(define_insn "*eqsi_power"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
(eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
(match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I")))
(clobber (match_scratch:SI 3 "=r,&r,r,r,r"))]
- "TARGET_32BIT"
+ "TARGET_POWER"
"@
xor %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
{sfi|subfic} %3,%1,0\;{ae|adde} %0,%3,%1
{xoril|xori} %0,%1,%b2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
{xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
{sfi|subfic} %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0"
- [(set_attr "length" "12,8,12,12,12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r")
- (eq:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r")
- (match_operand:DI 2 "reg_or_cint_operand" "r,O,K,J,I")))
- (clobber (match_scratch:DI 3 "=r,&r,r,r,r"))]
- "TARGET_64BIT"
- "@
- xor %0,%1,%2\;subfic %3,%0,0\;adde %0,%3,%0
- subfic %3,%1,0\;adde %0,%3,%1
- xori %0,%1,%b2\;subfic %3,%0,0\;adde %0,%3,%0
- xoris %0,%1,%u2\;subfic %3,%0,0\;adde %0,%3,%0
- subfic %0,%1,%2\;subfic %3,%0,0\;adde %0,%3,%0"
- [(set_attr "length" "12,8,12,12,12")])
-
-(define_insn ""
- [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
- (compare:CC
- (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
- (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I,r,O,K,L,I"))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r")
- (eq:SI (match_dup 1) (match_dup 2)))
- (clobber (match_scratch:SI 3 "=r,&r,r,r,r,r,&r,r,r,r"))]
- "TARGET_32BIT"
- "@
- xor %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
- {sfi|subfic} %3,%1,0\;{ae.|adde.} %0,%3,%1
- {xoril|xori} %0,%1,%b2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
- {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
- {sfi|subfic} %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
- #
- #
- #
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
-
-(define_split
- [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_cint_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (eq:SI (match_dup 1) (match_dup 2)))
- (clobber (match_scratch:SI 3 ""))]
- "TARGET_32BIT && reload_completed"
- [(parallel [(set (match_dup 0)
- (eq:SI (match_dup 1) (match_dup 2)))
- (clobber (match_dup 3))])
- (set (match_dup 4)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
- (compare:CC
- (eq:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
- (match_operand:DI 2 "reg_or_cint_operand" "r,O,K,J,I,r,O,K,J,I"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r")
- (eq:DI (match_dup 1) (match_dup 2)))
- (clobber (match_scratch:DI 3 "=r,&r,r,r,r,r,&r,r,r,r"))]
- "TARGET_64BIT"
- "@
- xor %0,%1,%2\;subfic %3,%0,0\;adde. %0,%3,%0
- subfic %3,%1,0\;adde. %0,%3,%1
- xori %0,%1,%b2\;subfic %3,%0,0\;adde. %0,%3,%0
- xoris %0,%1,%u2\;subfic %3,%0,0\;adde. %0,%3,%0
- subfic %0,%1,%2\;subfic %3,%0,0\;adde. %0,%3,%0
- #
- #
- #
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
-
-(define_split
- [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (eq:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_cint_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (eq:DI (match_dup 1) (match_dup 2)))
- (clobber (match_scratch:DI 3 ""))]
- "TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
- (eq:DI (match_dup 1) (match_dup 2)))
- (clobber (match_dup 3))])
- (set (match_dup 4)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
+ [(set_attr "type" "three,two,three,three,three")
+ (set_attr "length" "12,8,12,12,12")])
;; We have insns of the form shown by the first define_insn below. If
;; there is something inside the comparison operation, we must split it.
@@ -11642,10 +11968,10 @@
(set (match_dup 2) (plus:SI (match_op_dup 1 [(match_dup 2) (match_dup 3)])
(match_dup 4)))])
-(define_insn ""
+(define_insn "*plus_eqsi"
[(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r")
(plus:SI (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
- (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I"))
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I"))
(match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r")))]
"TARGET_32BIT"
"@
@@ -11654,18 +11980,19 @@
{xoril|xori} %0,%1,%b2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
{xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
{sfi|subfic} %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3"
- [(set_attr "length" "12,8,12,12,12")])
+ [(set_attr "type" "three,two,three,three,three")
+ (set_attr "length" "12,8,12,12,12")])
-(define_insn ""
+(define_insn "*compare_plus_eqsi"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
(compare:CC
(plus:SI
(eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
- (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I,r,O,K,L,I"))
(match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
(const_int 0)))
(clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r"))]
- "TARGET_32BIT"
+ "TARGET_32BIT && optimize_size"
"@
xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
{sfi|subfic} %4,%1,0\;{aze.|addze.} %4,%3
@@ -11685,11 +12012,11 @@
(compare:CC
(plus:SI
(eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 2 "scc_eq_operand" ""))
(match_operand:SI 3 "gpc_reg_operand" ""))
(const_int 0)))
(clobber (match_scratch:SI 4 ""))]
- "TARGET_32BIT && reload_completed"
+ "TARGET_32BIT && optimize_size && reload_completed"
[(set (match_dup 4)
(plus:SI (eq:SI (match_dup 1)
(match_dup 2))
@@ -11699,17 +12026,17 @@
(const_int 0)))]
"")
-(define_insn ""
+(define_insn "*plus_eqsi_compare"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
(compare:CC
(plus:SI
(eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
- (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I,r,O,K,L,I"))
(match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r")
(plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_32BIT"
+ "TARGET_32BIT && optimize_size"
"@
xor %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
{sfi|subfic} %0,%1,0\;{aze.|addze.} %0,%3
@@ -11729,12 +12056,12 @@
(compare:CC
(plus:SI
(eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 2 "scc_eq_operand" ""))
(match_operand:SI 3 "gpc_reg_operand" ""))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "")
(plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_32BIT && reload_completed"
+ "TARGET_32BIT && optimize_size && reload_completed"
[(set (match_dup 0)
(plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
(set (match_dup 4)
@@ -11742,41 +12069,67 @@
(const_int 0)))]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
- (neg:SI (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
- (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I"))))]
- "TARGET_32BIT"
- "@
- xor %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
- {ai|addic} %0,%1,-1\;{sfe|subfe} %0,%0,%0
- {xoril|xori} %0,%1,%b2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
- {xoriu|xoris} %0,%1,%u2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
- {sfi|subfic} %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0"
- [(set_attr "length" "12,8,12,12,12")])
+(define_insn "*neg_eq0<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (eq:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (const_int 0))))]
+ ""
+ "{ai|addic} %0,%1,-1\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*neg_eq<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (eq:P (match_operand:P 1 "gpc_reg_operand" "%r")
+ (match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (eq:P (match_dup 3) (const_int 0))))]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[3] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[3] = operands[1];
+ })
;; Simplify (ne X (const_int 0)) on the PowerPC. No need to on the Power,
;; since it nabs/sr is just as fast.
-(define_insn "*ne0"
+(define_insn "*ne0si"
[(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
(lshiftrt:SI (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
(const_int 31)))
(clobber (match_scratch:SI 2 "=&r"))]
"! TARGET_POWER && TARGET_32BIT && !TARGET_ISEL"
"{ai|addic} %2,%1,-1\;{sfe|subfe} %0,%2,%1"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
-(define_insn ""
+(define_insn "*ne0di"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(lshiftrt:DI (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
(const_int 63)))
(clobber (match_scratch:DI 2 "=&r"))]
"TARGET_64BIT"
"addic %2,%1,-1\;subfe %0,%2,%1"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
;; This is what (plus (ne X (const_int 0)) Y) looks like.
-(define_insn ""
+(define_insn "*plus_ne0si"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (lshiftrt:SI
(neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
@@ -11785,9 +12138,10 @@
(clobber (match_scratch:SI 3 "=&r"))]
"TARGET_32BIT"
"{ai|addic} %3,%1,-1\;{aze|addze} %0,%2"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
-(define_insn ""
+(define_insn "*plus_ne0di"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(plus:DI (lshiftrt:DI
(neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
@@ -11796,9 +12150,10 @@
(clobber (match_scratch:DI 3 "=&r"))]
"TARGET_64BIT"
"addic %3,%1,-1\;addze %0,%2"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
-(define_insn ""
+(define_insn "*compare_plus_ne0si"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
(compare:CC
(plus:SI (lshiftrt:SI
@@ -11836,7 +12191,7 @@
(const_int 0)))]
"")
-(define_insn ""
+(define_insn "*compare_plus_ne0di"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
(compare:CC
(plus:DI (lshiftrt:DI
@@ -11871,7 +12226,7 @@
(const_int 0)))]
"")
-(define_insn ""
+(define_insn "*plus_ne0si_compare"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC
(plus:SI (lshiftrt:SI
@@ -11912,7 +12267,7 @@
(const_int 0)))]
"")
-(define_insn ""
+(define_insn "*plus_ne0di_compare"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC
(plus:DI (lshiftrt:DI
@@ -12090,62 +12445,24 @@
{ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{srai|srawi} %0,%0,31"
[(set_attr "length" "12")])
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI")))]
- "TARGET_32BIT"
+(define_insn "*leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))]
+ ""
"{sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (leu:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (match_operand:DI 2 "reg_or_short_operand" "rI")))]
- "TARGET_64BIT"
- "subf%I2c %0,%1,%2\;li %0,0\;adde %0,%0,%0"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
- (compare:CC
- (leu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "reg_or_short_operand" "rI,rI"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (leu:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT"
- "@
- subf%I2c %0,%1,%2\;li %0,0\;adde. %0,%0,%0
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,16")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (leu:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_short_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (leu:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT && reload_completed"
- [(set (match_dup 0)
- (leu:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
-(define_insn ""
+(define_insn "*leu<mode>_compare"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC
- (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "rI,rI"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (leu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT"
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (leu:P (match_dup 1) (match_dup 2)))]
+ ""
"@
{sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
#"
@@ -12155,27 +12472,28 @@
(define_split
[(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC
- (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_short_operand" ""))
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_short_operand" ""))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (leu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT && reload_completed"
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (leu:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
[(set (match_dup 0)
- (leu:SI (match_dup 1) (match_dup 2)))
+ (leu:P (match_dup 1) (match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
- (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI"))
- (match_operand:SI 3 "gpc_reg_operand" "r")))]
- "TARGET_32BIT"
+(define_insn "*plus_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))
+ (match_operand:P 3 "gpc_reg_operand" "r")))]
+ ""
"{sf%I2|subf%I2c} %0,%1,%2\;{aze|addze} %0,%3"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
@@ -12242,23 +12560,25 @@
(const_int 0)))]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (neg:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
- "TARGET_32BIT"
+(define_insn "*neg_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))))]
+ ""
"{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0"
- [(set_attr "length" "12")])
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
- (and:SI (neg:SI
- (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI")))
- (match_operand:SI 3 "gpc_reg_operand" "r")))]
- "TARGET_32BIT"
+(define_insn "*and_neg_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (and:P (neg:P
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))
+ (match_operand:P 3 "gpc_reg_operand" "r")))]
+ ""
"{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0"
- [(set_attr "length" "12")])
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
@@ -12451,138 +12771,75 @@
"doz%I2 %0,%1,%2\;nabs %0,%0\;{srai|srawi} %0,%0,31"
[(set_attr "length" "12")])
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P")))]
- "TARGET_32BIT"
- "@
- {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;neg %0,%0
- {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;neg %0,%0"
- [(set_attr "length" "12")])
+(define_insn_and_split "*ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (neg:P (match_dup 0)))]
+ "")
-(define_insn ""
+(define_insn_and_split "*ltu<mode>_compare"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
(compare:CC
- (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
- (ltu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT"
- "@
- {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;neg. %0,%0
- {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;neg. %0,%0
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,12,16,16")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_neg_short_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (ltu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 0)
- (ltu:SI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (ltu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 3)
+ (compare:CC (neg:P (match_dup 0)) (const_int 0)))
+ (set (match_dup 0) (neg:P (match_dup 0)))])]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
- (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))
- (match_operand:SI 3 "reg_or_short_operand" "rI,rI")))]
- "TARGET_32BIT"
- "@
- {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;{sf%I3|subf%I3c} %0,%0,%3
- {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;{sf%I3|subf%I3c} %0,%0,%3"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC
- (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
- (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
- (const_int 0)))
- (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
- "TARGET_32BIT"
- "@
- {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %4,%4,%3
- {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %4,%4,%3
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,12,16,16")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_neg_short_operand" ""))
- (match_operand:SI 3 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:SI 4 ""))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 4)
- (plus:SI (ltu:SI (match_dup 1) (match_dup 2))
- (match_dup 3)))
- (set (match_dup 0)
- (compare:CC (match_dup 4)
- (const_int 0)))]
+(define_insn_and_split "*plus_ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,r")
+ (plus:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:P 3 "reg_or_short_operand" "rI,rI")))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))]
"")
-(define_insn ""
+(define_insn_and_split "*plus_ltu<mode>_compare"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
(compare:CC
- (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
- (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (plus:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r,r,r"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
- (plus:SI (ltu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_32BIT"
- "@
- {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;{sf.|subfc.} %0,%0,%3
- {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;{sf.|subfc.} %0,%0,%3
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,12,16,16")])
-
-(define_split
- [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_neg_short_operand" ""))
- (match_operand:SI 3 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (plus:SI (ltu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 0)
- (plus:SI (ltu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
- (set (match_dup 4)
- (compare:CC (match_dup 0)
- (const_int 0)))]
+ (set (match_operand:P 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:P (ltu:P (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 4)
+ (compare:CC (minus:P (match_dup 3) (match_dup 0))
+ (const_int 0)))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))])]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (neg:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))))]
- "TARGET_32BIT"
+(define_insn "*neg_ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))))]
+ ""
"@
{sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0
{ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
(define_insn ""
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
@@ -12709,35 +12966,26 @@
"doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0"
[(set_attr "length" "12")])
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P")))]
- "TARGET_32BIT"
+(define_insn "*geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))]
+ ""
"@
{sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0
{ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (geu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "reg_or_neg_short_operand" "r,P")))]
- "TARGET_64BIT"
- "@
- subfc %0,%2,%1\;li %0,0\;adde %0,%0,%0
- addic %0,%1,%n2\;li %0,0\;adde %0,%0,%0"
- [(set_attr "length" "12")])
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
-(define_insn ""
+(define_insn "*geu<mode>_compare"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
(compare:CC
- (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
- (geu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT"
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (geu:P (match_dup 1) (match_dup 2)))]
+ ""
"@
{sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
{ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
@@ -12749,62 +12997,30 @@
(define_split
[(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC
- (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_neg_short_operand" ""))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (geu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 0)
- (geu:SI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC
- (geu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:DI 2 "reg_or_neg_short_operand" "r,P,r,P"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
- (geu:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT"
- "@
- subfc %0,%2,%1\;li %0,0\;adde. %0,%0,%0
- addic %0,%1,%n2\;li %0,0\;adde. %0,%0,%0
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,12,16,16")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (geu:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_neg_short_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (geu:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT && reload_completed"
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (geu:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
[(set (match_dup 0)
- (geu:DI (match_dup 1) (match_dup 2)))
+ (geu:P (match_dup 1) (match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
- (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))
- (match_operand:SI 3 "gpc_reg_operand" "r,r")))]
- "TARGET_32BIT"
+(define_insn "*plus_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,&r")
+ (plus:P (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r")))]
+ ""
"@
{sf|subfc} %0,%2,%1\;{aze|addze} %0,%3
{ai|addic} %0,%1,%n2\;{aze|addze} %0,%3"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
@@ -12875,27 +13091,29 @@
(const_int 0)))]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (neg:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_short_operand" "r,I"))))]
- "TARGET_32BIT"
+(define_insn "*neg_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I"))))]
+ ""
"@
{sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0
{sfi|subfic} %0,%1,-1\;{a%I2|add%I2c} %0,%0,%2\;{sfe|subfe} %0,%0,%0"
- [(set_attr "length" "12")])
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
- (and:SI (neg:SI
- (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_neg_short_operand" "r,P")))
- (match_operand:SI 3 "gpc_reg_operand" "r,r")))]
- "TARGET_32BIT"
+(define_insn "*and_neg_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,&r")
+ (and:P (neg:P
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))
+ (match_operand:P 3 "gpc_reg_operand" "r,r")))]
+ ""
"@
{sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0
{ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0"
- [(set_attr "length" "12")])
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
@@ -12973,84 +13191,6 @@
(define_insn ""
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (const_int 0)))]
- "TARGET_32BIT"
- "{sfi|subfic} %0,%1,0\;{ame|addme} %0,%0\;{sri|srwi} %0,%0,31"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (const_int 0)))]
- "TARGET_64BIT"
- "subfic %0,%1,0\;addme %0,%0\;srdi %0,%0,63"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC
- (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (const_int 0))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (gt:SI (match_dup 1) (const_int 0)))]
- "TARGET_32BIT"
- "@
- {sfi|subfic} %0,%1,0\;{ame|addme} %0,%0\;{sri.|srwi.} %0,%0,31
- #"
- [(set_attr "type" "delayed_compare")
- (set_attr "length" "12,16")])
-
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (const_int 0))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (gt:SI (match_dup 1) (const_int 0)))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 0)
- (gt:SI (match_dup 1) (const_int 0)))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC
- (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (const_int 0))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (gt:DI (match_dup 1) (const_int 0)))]
- "TARGET_64BIT"
- "@
- subfic %0,%1,0\;addme %0,%0\;srdi. %0,%0,63
- #"
- [(set_attr "type" "delayed_compare")
- (set_attr "length" "12,16")])
-
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (const_int 0))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (gt:DI (match_dup 1) (const_int 0)))]
- "TARGET_64BIT && reload_completed"
- [(set (match_dup 0)
- (gt:DI (match_dup 1) (const_int 0)))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_short_operand" "r")))]
"TARGET_POWER"
"doz %0,%2,%1\;nabs %0,%0\;{sri|srwi} %0,%0,31"
@@ -13087,23 +13227,15 @@
(const_int 0)))]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
- (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (const_int 0))
- (match_operand:SI 2 "gpc_reg_operand" "r")))]
- "TARGET_32BIT"
+(define_insn "*plus_gt0<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (gt:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (const_int 0))
+ (match_operand:P 2 "gpc_reg_operand" "r")))]
+ ""
"{a|addc} %0,%1,%1\;{sfe|subfe} %0,%1,%0\;{aze|addze} %0,%2"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
- (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (const_int 0))
- (match_operand:DI 2 "gpc_reg_operand" "r")))]
- "TARGET_64BIT"
- "addc %0,%1,%1\;subfe %0,%1,%0\;addze %0,%2"
- [(set_attr "length" "12")])
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
(define_insn ""
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
@@ -13311,280 +13443,79 @@
(define_insn ""
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(neg:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (const_int 0))))]
- "TARGET_32BIT"
- "{sfi|subfic} %0,%1,0\;{ame|addme} %0,%0\;{srai|srawi} %0,%0,31"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (neg:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (const_int 0))))]
- "TARGET_64BIT"
- "subfic %0,%1,0\;addme %0,%0\;sradi %0,%0,63"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (neg:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_short_operand" "r"))))]
"TARGET_POWER"
"doz %0,%2,%1\;nabs %0,%0\;{srai|srawi} %0,%0,31"
[(set_attr "length" "12")])
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI")))]
- "TARGET_32BIT"
- "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;neg %0,%0"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (match_operand:DI 2 "reg_or_short_operand" "rI")))]
- "TARGET_64BIT"
- "subf%I2c %0,%1,%2\;subfe %0,%0,%0\;neg %0,%0"
- [(set_attr "length" "12")])
-
-(define_insn ""
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
- (compare:CC
- (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (gtu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT"
- "@
- {sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;neg. %0,%0
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,16")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_short_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (gtu:SI (match_dup 1) (match_dup 2)))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 0)
- (gtu:SI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
+(define_insn_and_split "*gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (neg:P (match_dup 0)))]
"")
-(define_insn ""
+(define_insn_and_split "*gtu<mode>_compare"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC
- (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "reg_or_short_operand" "rI,rI"))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
- (gtu:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT"
- "@
- subf%I2c %0,%1,%2\;subfe %0,%0,%0\;neg. %0,%0
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "12,16")])
-
-(define_split
- [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_short_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (gtu:DI (match_dup 1) (match_dup 2)))]
- "TARGET_64BIT && reload_completed"
- [(set (match_dup 0)
- (gtu:DI (match_dup 1) (match_dup 2)))
- (set (match_dup 3)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
- (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "reg_or_short_operand" "I,rI"))
- (match_operand:SI 3 "reg_or_short_operand" "r,rI")))]
- "TARGET_32BIT"
- "@
- {ai|addic} %0,%1,%k2\;{aze|addze} %0,%3
- {sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;{sf%I3|subf%I3c} %0,%0,%3"
- [(set_attr "length" "8,12")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r")
- (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
- (match_operand:DI 2 "reg_or_short_operand" "I,rI"))
- (match_operand:DI 3 "reg_or_short_operand" "r,rI")))]
- "TARGET_64BIT"
- "@
- addic %0,%1,%k2\;addze %0,%3
- subf%I2c %0,%1,%2\;subfe %0,%0,%0\;subf%I3c %0,%0,%3"
- [(set_attr "length" "8,12")])
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC
- (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:SI 2 "reg_or_short_operand" "I,r,I,r"))
- (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
- (const_int 0)))
- (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
- "TARGET_32BIT"
- "@
- {ai|addic} %4,%1,%k2\;{aze.|addze.} %4,%3
- {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %4,%4,%3
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "8,12,12,16")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_short_operand" ""))
- (match_operand:SI 3 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:SI 4 ""))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 4)
- (plus:SI (gtu:SI (match_dup 1) (match_dup 2))
- (match_dup 3)))
- (set (match_dup 0)
- (compare:CC (match_dup 4)
- (const_int 0)))]
- "")
-
-(define_insn ""
- [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC
- (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:DI 2 "reg_or_short_operand" "I,r,I,r"))
- (match_operand:DI 3 "gpc_reg_operand" "r,r,r,r"))
+ (gtu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "rI,rI"))
(const_int 0)))
- (clobber (match_scratch:DI 4 "=&r,&r,&r,&r"))]
- "TARGET_64BIT"
- "@
- addic %4,%1,%k2\;addze. %4,%3
- subf%I2c %4,%1,%2\;subfe %4,%4,%4\;subfc. %4,%4,%3
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "8,12,12,16")])
-
-(define_split
- [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_short_operand" ""))
- (match_operand:DI 3 "gpc_reg_operand" ""))
- (const_int 0)))
- (clobber (match_scratch:DI 4 ""))]
- "TARGET_64BIT && reload_completed"
- [(set (match_dup 4)
- (plus:DI (gtu:DI (match_dup 1) (match_dup 2))
- (match_dup 3)))
- (set (match_dup 0)
- (compare:CC (match_dup 4)
- (const_int 0)))]
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (gtu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 3)
+ (compare:CC (neg:P (match_dup 0)) (const_int 0)))
+ (set (match_dup 0) (neg:P (match_dup 0)))])]
"")
-(define_insn ""
- [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
- (compare:CC
- (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:SI 2 "reg_or_short_operand" "I,r,I,r"))
- (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
- (plus:SI (gtu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_32BIT"
- "@
- {ai|addic} %0,%1,%k2\;{aze.|addze.} %0,%3
- {sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;{sf.|subfc.} %0,%0,%3
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "8,12,12,16")])
-
-(define_split
- [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "")
- (match_operand:SI 2 "reg_or_short_operand" ""))
- (match_operand:SI 3 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (plus:SI (gtu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_32BIT && reload_completed"
- [(set (match_dup 0)
- (plus:SI (gtu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
- (set (match_dup 4)
- (compare:CC (match_dup 0)
- (const_int 0)))]
+(define_insn_and_split "*plus_gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))
+ (match_operand:P 3 "reg_or_short_operand" "rI")))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))]
"")
-(define_insn ""
+(define_insn_and_split "*plus_gtu<mode>_compare"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
(compare:CC
- (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
- (match_operand:DI 2 "reg_or_short_operand" "I,r,I,r"))
- (match_operand:DI 3 "gpc_reg_operand" "r,r,r,r"))
+ (plus:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "I,r,I,r"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r,r,r"))
(const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
- (plus:DI (gtu:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_64BIT"
- "@
- addic %0,%1,%k2\;addze. %0,%3
- subf%I2c %0,%1,%2\;subfe %0,%0,%0\;subfc. %0,%0,%3
- #
- #"
- [(set_attr "type" "compare")
- (set_attr "length" "8,12,12,16")])
-
-(define_split
- [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
- (compare:CC
- (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "")
- (match_operand:DI 2 "reg_or_short_operand" ""))
- (match_operand:DI 3 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (plus:DI (gtu:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
- "TARGET_64BIT && reload_completed"
- [(set (match_dup 0)
- (plus:DI (gtu:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
- (set (match_dup 4)
- (compare:CC (match_dup 0)
- (const_int 0)))]
+ (set (match_operand:P 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:P (gtu:P (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 4)
+ (compare:CC (minus:P (match_dup 3) (match_dup 0))
+ (const_int 0)))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))])]
"")
-(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (neg:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
- "TARGET_32BIT"
+(define_insn "*neg_gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))))]
+ ""
"{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0"
- [(set_attr "length" "8")])
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
-(define_insn ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
- (neg:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r")
- (match_operand:DI 2 "reg_or_short_operand" "rI"))))]
- "TARGET_64BIT"
- "subf%I2c %0,%1,%2\;subfe %0,%0,%0"
- [(set_attr "length" "8")])
;; Define both directions of branch and return. If we need a reload
;; register, we'd rather use CR0 since it is much easier to copy a
@@ -13618,7 +13549,7 @@
{
return output_cbranch (operands[0], NULL, 0, insn);
}"
- [(set_attr "type" "branch")
+ [(set_attr "type" "jmpreg")
(set_attr "length" "4")])
(define_insn ""
@@ -13649,7 +13580,7 @@
{
return output_cbranch (operands[0], NULL, 1, insn);
}"
- [(set_attr "type" "branch")
+ [(set_attr "type" "jmpreg")
(set_attr "length" "4")])
;; Logic on condition register values.
@@ -13731,28 +13662,28 @@
{
int positive_1, positive_2;
- positive_1 = branch_positive_comparison_operator (operands[1], CCEQmode);
- positive_2 = branch_positive_comparison_operator (operands[3], CCEQmode);
+ positive_1 = branch_positive_comparison_operator (operands[1],
+ GET_MODE (operands[1]));
+ positive_2 = branch_positive_comparison_operator (operands[3],
+ GET_MODE (operands[3]));
if (! positive_1)
- operands[1] = gen_rtx (rs6000_reverse_condition (GET_MODE (operands[2]),
- GET_CODE (operands[1])),
- SImode,
- operands[2], const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[2]),
+ GET_CODE (operands[1])),
+ SImode,
+ operands[2], const0_rtx);
else if (GET_MODE (operands[1]) != SImode)
- operands[1] = gen_rtx (GET_CODE (operands[1]),
- SImode,
- operands[2], const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]), SImode,
+ operands[2], const0_rtx);
if (! positive_2)
- operands[3] = gen_rtx (rs6000_reverse_condition (GET_MODE (operands[4]),
- GET_CODE (operands[3])),
- SImode,
- operands[4], const0_rtx);
+ operands[3] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[4]),
+ GET_CODE (operands[3])),
+ SImode,
+ operands[4], const0_rtx);
else if (GET_MODE (operands[3]) != SImode)
- operands[3] = gen_rtx (GET_CODE (operands[3]),
- SImode,
- operands[4], const0_rtx);
+ operands[3] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[4], const0_rtx);
if (positive_1 == positive_2)
{
@@ -13781,33 +13712,16 @@
[(set_attr "type" "jmpreg")])
(define_expand "indirect_jump"
- [(set (pc) (match_operand 0 "register_operand" ""))]
- ""
- "
-{
- if (TARGET_32BIT)
- emit_jump_insn (gen_indirect_jumpsi (operands[0]));
- else
- emit_jump_insn (gen_indirect_jumpdi (operands[0]));
- DONE;
-}")
+ [(set (pc) (match_operand 0 "register_operand" ""))])
-(define_insn "indirect_jumpsi"
- [(set (pc) (match_operand:SI 0 "register_operand" "c,*l"))]
- "TARGET_32BIT"
+(define_insn "*indirect_jump<mode>"
+ [(set (pc) (match_operand:P 0 "register_operand" "c,*l"))]
+ ""
"@
bctr
{br|blr}"
[(set_attr "type" "jmpreg")])
-(define_insn "indirect_jumpdi"
- [(set (pc) (match_operand:DI 0 "register_operand" "c,*l"))]
- "TARGET_64BIT"
- "@
- bctr
- blr"
- [(set_attr "type" "jmpreg")])
-
;; Table jump for switch statements:
(define_expand "tablejump"
[(use (match_operand 0 "" ""))
@@ -13836,7 +13750,7 @@
}")
(define_expand "tablejumpdi"
- [(set (match_dup 4)
+ [(set (match_dup 4)
(sign_extend:DI (match_operand:SI 0 "lwa_operand" "rm")))
(set (match_dup 3)
(plus:DI (match_dup 4)
@@ -13850,26 +13764,16 @@
operands[4] = gen_reg_rtx (DImode);
}")
-(define_insn ""
+(define_insn "*tablejump<mode>_internal1"
[(set (pc)
- (match_operand:SI 0 "register_operand" "c,*l"))
+ (match_operand:P 0 "register_operand" "c,*l"))
(use (label_ref (match_operand 1 "" "")))]
- "TARGET_32BIT"
+ ""
"@
bctr
{br|blr}"
[(set_attr "type" "jmpreg")])
-(define_insn ""
- [(set (pc)
- (match_operand:DI 0 "register_operand" "c,*l"))
- (use (label_ref (match_operand 1 "" "")))]
- "TARGET_64BIT"
- "@
- bctr
- blr"
- [(set_attr "type" "jmpreg")])
-
(define_insn "nop"
[(const_int 0)]
""
@@ -13905,32 +13809,18 @@
DONE;
}")
-(define_expand "ctrsi"
+(define_expand "ctr<mode>"
[(parallel [(set (pc)
- (if_then_else (ne (match_operand:SI 0 "register_operand" "")
+ (if_then_else (ne (match_operand:P 0 "register_operand" "")
(const_int 1))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
- (plus:SI (match_dup 0)
+ (plus:P (match_dup 0)
(const_int -1)))
(clobber (match_scratch:CC 2 ""))
- (clobber (match_scratch:SI 3 ""))])]
- "TARGET_32BIT"
- "")
-
-(define_expand "ctrdi"
- [(parallel [(set (pc)
- (if_then_else (ne (match_operand:DI 0 "register_operand" "")
- (const_int 1))
- (label_ref (match_operand 1 "" ""))
- (pc)))
- (set (match_dup 0)
- (plus:DI (match_dup 0)
- (const_int -1)))
- (clobber (match_scratch:CC 2 ""))
- (clobber (match_scratch:DI 3 ""))])]
- "TARGET_64BIT"
+ (clobber (match_scratch:P 3 ""))])]
+ ""
"")
;; We need to be able to do this for any operand, including MEM, or we
@@ -13939,66 +13829,18 @@
;; For the length attribute to be calculated correctly, the
;; label MUST be operand 0.
-(define_insn "*ctrsi_internal1"
- [(set (pc)
- (if_then_else (ne (match_operand:SI 1 "register_operand" "c,*r,*r,*r")
- (const_int 1))
- (label_ref (match_operand 0 "" ""))
- (pc)))
- (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l")
- (plus:SI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:SI 4 "=X,X,&r,r"))]
- "TARGET_32BIT"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"{bdn|bdnz} %l0\";
- else
- return \"bdz $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-(define_insn "*ctrsi_internal2"
- [(set (pc)
- (if_then_else (ne (match_operand:SI 1 "register_operand" "c,*r,*r,*r")
- (const_int 1))
- (pc)
- (label_ref (match_operand 0 "" ""))))
- (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l")
- (plus:SI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:SI 4 "=X,X,&r,r"))]
- "TARGET_32BIT"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"bdz %l0\";
- else
- return \"{bdn|bdnz} $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-(define_insn "*ctrdi_internal1"
+(define_insn "*ctr<mode>_internal1"
[(set (pc)
- (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r,*r")
+ (if_then_else (ne (match_operand:P 1 "register_operand" "c,*r,*r,*r")
(const_int 1))
(label_ref (match_operand 0 "" ""))
(pc)))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l")
- (plus:DI (match_dup 1)
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
(const_int -1)))
(clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,&r,r"))]
- "TARGET_64BIT"
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
"*
{
if (which_alternative != 0)
@@ -14011,116 +13853,18 @@
[(set_attr "type" "branch")
(set_attr "length" "*,12,16,16")])
-(define_insn "*ctrdi_internal2"
+(define_insn "*ctr<mode>_internal2"
[(set (pc)
- (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r,*r")
+ (if_then_else (ne (match_operand:P 1 "register_operand" "c,*r,*r,*r")
(const_int 1))
(pc)
(label_ref (match_operand 0 "" ""))))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l")
- (plus:DI (match_dup 1)
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
(const_int -1)))
(clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,&r,r"))]
- "TARGET_64BIT"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"bdz %l0\";
- else
- return \"{bdn|bdnz} $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-;; Similar, but we can use GE since we have a REG_NONNEG.
-
-(define_insn "*ctrsi_internal3"
- [(set (pc)
- (if_then_else (ge (match_operand:SI 1 "register_operand" "c,*r,*r,*r")
- (const_int 0))
- (label_ref (match_operand 0 "" ""))
- (pc)))
- (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l")
- (plus:SI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:SI 4 "=X,X,&r,r"))]
- "TARGET_32BIT && find_reg_note (insn, REG_NONNEG, 0)"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"{bdn|bdnz} %l0\";
- else
- return \"bdz $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-(define_insn "*ctrsi_internal4"
- [(set (pc)
- (if_then_else (ge (match_operand:SI 1 "register_operand" "c,*r,*r,*r")
- (const_int 0))
- (pc)
- (label_ref (match_operand 0 "" ""))))
- (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l")
- (plus:SI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:SI 4 "=X,X,&r,r"))]
- "TARGET_32BIT && find_reg_note (insn, REG_NONNEG, 0)"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"bdz %l0\";
- else
- return \"{bdn|bdnz} $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-(define_insn "*ctrdi_internal3"
- [(set (pc)
- (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r,*r")
- (const_int 0))
- (label_ref (match_operand 0 "" ""))
- (pc)))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l")
- (plus:DI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,&r,r"))]
- "TARGET_64BIT && find_reg_note (insn, REG_NONNEG, 0)"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"{bdn|bdnz} %l0\";
- else
- return \"bdz $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-(define_insn "*ctrdi_internal4"
- [(set (pc)
- (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r,*r")
- (const_int 0))
- (pc)
- (label_ref (match_operand 0 "" ""))))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l")
- (plus:DI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,&r,r"))]
- "TARGET_64BIT && find_reg_note (insn, REG_NONNEG, 0)"
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
"*
{
if (which_alternative != 0)
@@ -14135,66 +13879,18 @@
;; Similar but use EQ
-(define_insn "*ctrsi_internal5"
+(define_insn "*ctr<mode>_internal5"
[(set (pc)
- (if_then_else (eq (match_operand:SI 1 "register_operand" "c,*r,*r,*r")
+ (if_then_else (eq (match_operand:P 1 "register_operand" "c,*r,*r,*r")
(const_int 1))
(label_ref (match_operand 0 "" ""))
(pc)))
- (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l")
- (plus:SI (match_dup 1)
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
(const_int -1)))
(clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:SI 4 "=X,X,&r,r"))]
- "TARGET_32BIT"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"bdz %l0\";
- else
- return \"{bdn|bdnz} $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-(define_insn "*ctrsi_internal6"
- [(set (pc)
- (if_then_else (eq (match_operand:SI 1 "register_operand" "c,*r,*r,*r")
- (const_int 1))
- (pc)
- (label_ref (match_operand 0 "" ""))))
- (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l")
- (plus:SI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:SI 4 "=X,X,&r,r"))]
- "TARGET_32BIT"
- "*
-{
- if (which_alternative != 0)
- return \"#\";
- else if (get_attr_length (insn) == 4)
- return \"{bdn|bdnz} %l0\";
- else
- return \"bdz $+8\;b %l0\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "*,12,16,16")])
-
-(define_insn "*ctrdi_internal5"
- [(set (pc)
- (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r,*r")
- (const_int 1))
- (label_ref (match_operand 0 "" ""))
- (pc)))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l")
- (plus:DI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,&r,r"))]
- "TARGET_64BIT"
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
"*
{
if (which_alternative != 0)
@@ -14207,18 +13903,18 @@
[(set_attr "type" "branch")
(set_attr "length" "*,12,16,16")])
-(define_insn "*ctrdi_internal6"
+(define_insn "*ctr<mode>_internal6"
[(set (pc)
- (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r,*r")
+ (if_then_else (eq (match_operand:P 1 "register_operand" "c,*r,*r,*r")
(const_int 1))
(pc)
(label_ref (match_operand 0 "" ""))))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l")
- (plus:DI (match_dup 1)
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
(const_int -1)))
(clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,&r,r"))]
- "TARGET_64BIT"
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
"*
{
if (which_alternative != 0)
@@ -14236,104 +13932,47 @@
(define_split
[(set (pc)
(if_then_else (match_operator 2 "comparison_operator"
- [(match_operand:SI 1 "gpc_reg_operand" "")
+ [(match_operand:P 1 "gpc_reg_operand" "")
(const_int 1)])
(match_operand 5 "" "")
(match_operand 6 "" "")))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (plus:SI (match_dup 1)
- (const_int -1)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (plus:P (match_dup 1) (const_int -1)))
(clobber (match_scratch:CC 3 ""))
- (clobber (match_scratch:SI 4 ""))]
- "TARGET_32BIT && reload_completed"
- [(parallel [(set (match_dup 3)
- (compare:CC (plus:SI (match_dup 1)
- (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (plus:SI (match_dup 1)
- (const_int -1)))])
- (set (pc) (if_then_else (match_dup 7)
- (match_dup 5)
- (match_dup 6)))]
- "
-{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
- const0_rtx); }")
-
-(define_split
- [(set (pc)
- (if_then_else (match_operator 2 "comparison_operator"
- [(match_operand:SI 1 "gpc_reg_operand" "")
- (const_int 1)])
- (match_operand 5 "" "")
- (match_operand 6 "" "")))
- (set (match_operand:SI 0 "nonimmediate_operand" "")
- (plus:SI (match_dup 1) (const_int -1)))
- (clobber (match_scratch:CC 3 ""))
- (clobber (match_scratch:SI 4 ""))]
- "TARGET_32BIT && reload_completed
- && ! gpc_reg_operand (operands[0], SImode)"
- [(parallel [(set (match_dup 3)
- (compare:CC (plus:SI (match_dup 1)
- (const_int -1))
- (const_int 0)))
- (set (match_dup 4)
- (plus:SI (match_dup 1)
- (const_int -1)))])
- (set (match_dup 0)
- (match_dup 4))
- (set (pc) (if_then_else (match_dup 7)
- (match_dup 5)
- (match_dup 6)))]
- "
-{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
- const0_rtx); }")
-(define_split
- [(set (pc)
- (if_then_else (match_operator 2 "comparison_operator"
- [(match_operand:DI 1 "gpc_reg_operand" "")
- (const_int 1)])
- (match_operand 5 "" "")
- (match_operand 6 "" "")))
- (set (match_operand:DI 0 "gpc_reg_operand" "")
- (plus:DI (match_dup 1)
- (const_int -1)))
- (clobber (match_scratch:CC 3 ""))
- (clobber (match_scratch:DI 4 ""))]
- "TARGET_64BIT && reload_completed"
+ (clobber (match_scratch:P 4 ""))]
+ "reload_completed"
[(parallel [(set (match_dup 3)
- (compare:CC (plus:DI (match_dup 1)
+ (compare:CC (plus:P (match_dup 1)
(const_int -1))
(const_int 0)))
(set (match_dup 0)
- (plus:DI (match_dup 1)
+ (plus:P (match_dup 1)
(const_int -1)))])
(set (pc) (if_then_else (match_dup 7)
(match_dup 5)
(match_dup 6)))]
"
-{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
- const0_rtx); }")
+{ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[2]), VOIDmode,
+ operands[3], const0_rtx); }")
(define_split
[(set (pc)
(if_then_else (match_operator 2 "comparison_operator"
- [(match_operand:DI 1 "gpc_reg_operand" "")
+ [(match_operand:P 1 "gpc_reg_operand" "")
(const_int 1)])
(match_operand 5 "" "")
(match_operand 6 "" "")))
- (set (match_operand:DI 0 "nonimmediate_operand" "")
- (plus:DI (match_dup 1) (const_int -1)))
+ (set (match_operand:P 0 "nonimmediate_operand" "")
+ (plus:P (match_dup 1) (const_int -1)))
(clobber (match_scratch:CC 3 ""))
- (clobber (match_scratch:DI 4 ""))]
- "TARGET_64BIT && reload_completed
- && ! gpc_reg_operand (operands[0], DImode)"
+ (clobber (match_scratch:P 4 ""))]
+ "reload_completed && ! gpc_reg_operand (operands[0], SImode)"
[(parallel [(set (match_dup 3)
- (compare:CC (plus:DI (match_dup 1)
+ (compare:CC (plus:P (match_dup 1)
(const_int -1))
(const_int 0)))
(set (match_dup 4)
- (plus:DI (match_dup 1)
+ (plus:P (match_dup 1)
(const_int -1)))])
(set (match_dup 0)
(match_dup 4))
@@ -14341,8 +13980,8 @@
(match_dup 5)
(match_dup 6)))]
"
-{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
- const0_rtx); }")
+{ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[2]), VOIDmode,
+ operands[3], const0_rtx); }")
(define_insn "trap"
[(trap_if (const_int 1) (const_int 0))]
@@ -14360,19 +13999,11 @@
(define_insn ""
[(trap_if (match_operator 0 "trap_comparison_operator"
- [(match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "reg_or_short_operand" "rI")])
+ [(match_operand:GPR 1 "register_operand" "r")
+ (match_operand:GPR 2 "reg_or_short_operand" "rI")])
(const_int 0))]
""
- "{t|tw}%V0%I2 %1,%2")
-
-(define_insn ""
- [(trap_if (match_operator 0 "trap_comparison_operator"
- [(match_operand:DI 1 "register_operand" "r")
- (match_operand:DI 2 "reg_or_short_operand" "rI")])
- (const_int 0))]
- "TARGET_POWERPC64"
- "td%V0%I2 %1,%2")
+ "{t|t<wd>}%V0%I2 %1,%2")
;; Insns related to generating the function prologue and epilogue.
@@ -14408,7 +14039,7 @@
(define_insn "movesi_from_cr"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71)
+ (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71)
(reg:CC 72) (reg:CC 73) (reg:CC 74) (reg:CC 75)]
UNSPEC_MOVESI_FROM_CR))]
""
@@ -14420,26 +14051,16 @@
[(set (match_operand:SI 1 "memory_operand" "=m")
(match_operand:SI 2 "gpc_reg_operand" "r"))])]
"TARGET_MULTIPLE"
- "{stm|stmw} %2,%1")
-
-(define_insn "*save_fpregs_si"
- [(match_parallel 0 "any_operand"
- [(clobber (match_operand:SI 1 "register_operand" "=l"))
- (use (match_operand:SI 2 "call_operand" "s"))
- (set (match_operand:DF 3 "memory_operand" "=m")
- (match_operand:DF 4 "gpc_reg_operand" "f"))])]
- "TARGET_32BIT"
- "bl %z2"
- [(set_attr "type" "branch")
- (set_attr "length" "4")])
+ "{stm|stmw} %2,%1"
+ [(set_attr "type" "store_ux")])
-(define_insn "*save_fpregs_di"
- [(match_parallel 0 "any_operand"
- [(clobber (match_operand:DI 1 "register_operand" "=l"))
- (use (match_operand:DI 2 "call_operand" "s"))
+(define_insn "*save_fpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:P 1 "register_operand" "=l"))
+ (use (match_operand:P 2 "call_operand" "s"))
(set (match_operand:DF 3 "memory_operand" "=m")
(match_operand:DF 4 "gpc_reg_operand" "f"))])]
- "TARGET_64BIT"
+ ""
"bl %z2"
[(set_attr "type" "branch")
(set_attr "length" "4")])
@@ -14497,7 +14118,7 @@
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand 2 "immediate_operand" "n")]
UNSPEC_MOVESI_TO_CR))]
- "GET_CODE (operands[0]) == REG
+ "GET_CODE (operands[0]) == REG
&& CR_REGNO_P (REGNO (operands[0]))
&& GET_CODE (operands[2]) == CONST_INT
&& INTVAL (operands[2]) == 1 << (75 - REGNO (operands[0]))"
@@ -14506,50 +14127,34 @@
; The load-multiple instructions have similar properties.
; Note that "load_multiple" is a name known to the machine-independent
-; code that actually corresponds to the powerpc load-string.
+; code that actually corresponds to the PowerPC load-string.
(define_insn "*lmw"
[(match_parallel 0 "lmw_operation"
[(set (match_operand:SI 1 "gpc_reg_operand" "=r")
(match_operand:SI 2 "memory_operand" "m"))])]
"TARGET_MULTIPLE"
- "{lm|lmw} %1,%2")
-
-(define_insn "*return_internal_si"
- [(return)
- (use (match_operand:SI 0 "register_operand" "lc"))]
- "TARGET_32BIT"
- "b%T0"
- [(set_attr "type" "jmpreg")])
+ "{lm|lmw} %1,%2"
+ [(set_attr "type" "load_ux")])
-(define_insn "*return_internal_di"
+(define_insn "*return_internal_<mode>"
[(return)
- (use (match_operand:DI 0 "register_operand" "lc"))]
- "TARGET_64BIT"
+ (use (match_operand:P 0 "register_operand" "lc"))]
+ ""
"b%T0"
[(set_attr "type" "jmpreg")])
; FIXME: This would probably be somewhat simpler if the Cygnus sibcall
-; stuff was in GCC. Oh, and "any_operand" is a bit flexible...
-
-(define_insn "*return_and_restore_fpregs_si"
- [(match_parallel 0 "any_operand"
- [(return)
- (use (match_operand:SI 1 "register_operand" "l"))
- (use (match_operand:SI 2 "call_operand" "s"))
- (set (match_operand:DF 3 "gpc_reg_operand" "=f")
- (match_operand:DF 4 "memory_operand" "m"))])]
- "TARGET_32BIT"
- "b %z2")
+; stuff was in GCC. Oh, and "any_parallel_operand" is a bit flexible...
-(define_insn "*return_and_restore_fpregs_di"
- [(match_parallel 0 "any_operand"
+(define_insn "*return_and_restore_fpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
[(return)
- (use (match_operand:DI 1 "register_operand" "l"))
- (use (match_operand:DI 2 "call_operand" "s"))
+ (use (match_operand:P 1 "register_operand" "l"))
+ (use (match_operand:P 2 "call_operand" "s"))
(set (match_operand:DF 3 "gpc_reg_operand" "=f")
(match_operand:DF 4 "memory_operand" "m"))])]
- "TARGET_64BIT"
+ ""
"b %z2")
; This is used in compiling the unwind routines.
@@ -14566,18 +14171,11 @@
}")
; We can't expand this before we know where the link register is stored.
-(define_insn "eh_set_lr_si"
- [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")]
+(define_insn "eh_set_lr_<mode>"
+ [(unspec_volatile [(match_operand:P 0 "register_operand" "r")]
UNSPECV_EH_RR)
- (clobber (match_scratch:SI 1 "=&b"))]
- "TARGET_32BIT"
- "#")
-
-(define_insn "eh_set_lr_di"
- [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
- UNSPECV_EH_RR)
- (clobber (match_scratch:DI 1 "=&b"))]
- "TARGET_64BIT"
+ (clobber (match_scratch:P 1 "=&b"))]
+ ""
"#")
(define_split
@@ -14592,7 +14190,7 @@
}")
(define_insn "prefetch"
- [(prefetch (match_operand:V4SI 0 "address_operand" "p")
+ [(prefetch (match_operand 0 "indexed_or_indirect_address" "a")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))]
"TARGET_POWERPC"
@@ -14603,6 +14201,8 @@
return INTVAL (operands[1]) ? \"dcbtst %a0\" : \"dcbt %a0\";
}"
[(set_attr "type" "load")])
+
+(include "sync.md")
(include "altivec.md")
(include "spe.md")
diff --git a/contrib/gcc/config/rs6000/rs6000.opt b/contrib/gcc/config/rs6000/rs6000.opt
new file mode 100644
index 0000000..8ff390d
--- /dev/null
+++ b/contrib/gcc/config/rs6000/rs6000.opt
@@ -0,0 +1,247 @@
+; Options for the rs6000 port of the compiler
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mpower
+Target Report RejectNegative Mask(POWER)
+Use POWER instruction set
+
+mno-power
+Target Report RejectNegative
+Do not use POWER instruction set
+
+mpower2
+Target Report Mask(POWER2)
+Use POWER2 instruction set
+
+mpowerpc
+Target Report RejectNegative Mask(POWERPC)
+Use PowerPC instruction set
+
+mno-powerpc
+Target Report RejectNegative
+Do not use PowerPC instruction set
+
+mpowerpc64
+Target Report Mask(POWERPC64)
+Use PowerPC-64 instruction set
+
+mpowerpc-gpopt
+Target Report Mask(PPC_GPOPT)
+Use PowerPC General Purpose group optional instructions
+
+mpowerpc-gfxopt
+Target Report Mask(PPC_GFXOPT)
+Use PowerPC Graphics group optional instructions
+
+mmfcrf
+Target Report Mask(MFCRF)
+Use PowerPC V2.01 single field mfcr instruction
+
+mpopcntb
+Target Report Mask(POPCNTB)
+Use PowerPC V2.02 popcntb instruction
+
+mfprnd
+Target Report Mask(FPRND)
+Use PowerPC V2.02 floating point rounding instructions
+
+maltivec
+Target Report Mask(ALTIVEC)
+Use AltiVec instructions
+
+mmulhw
+Target Report Mask(MULHW)
+Use 4xx half-word multiply instructions
+
+mdlmzb
+Target Report Mask(DLMZB)
+Use 4xx string-search dlmzb instruction
+
+mmultiple
+Target Report Mask(MULTIPLE)
+Generate load/store multiple instructions
+
+mstring
+Target Report Mask(STRING)
+Generate string instructions for block moves
+
+mnew-mnemonics
+Target Report RejectNegative Mask(NEW_MNEMONICS)
+Use new mnemonics for PowerPC architecture
+
+mold-mnemonics
+Target Report RejectNegative InverseMask(NEW_MNEMONICS)
+Use old mnemonics for PowerPC architecture
+
+msoft-float
+Target Report RejectNegative Mask(SOFT_FLOAT)
+Do not use hardware floating point
+
+mhard-float
+Target Report RejectNegative InverseMask(SOFT_FLOAT, HARD_FLOAT)
+Use hardware floating point
+
+mno-update
+Target Report RejectNegative Mask(NO_UPDATE)
+Do not generate load/store with update instructions
+
+mupdate
+Target Report RejectNegative InverseMask(NO_UPDATE, UPDATE)
+Generate load/store with update instructions
+
+mno-fused-madd
+Target Report RejectNegative Mask(NO_FUSED_MADD)
+Do not generate fused multiply/add instructions
+
+mfused-madd
+Target Report RejectNegative InverseMask(NO_FUSED_MADD, FUSED_MADD)
+Generate fused multiply/add instructions
+
+msched-prolog
+Target Report Var(TARGET_SCHED_PROLOG) Init(1)
+Schedule the start and end of the procedure
+
+msched-epilog
+Target Undocumented Var(TARGET_SCHED_PROLOG) VarExists
+
+maix-struct-return
+Target Report RejectNegative Var(aix_struct_return)
+Return all structures in memory (AIX default)
+
+msvr4-struct-return
+Target Report RejectNegative Var(aix_struct_return,0) VarExists
+Return small structures in registers (SVR4 default)
+
+mxl-compat
+Target Report Var(TARGET_XL_COMPAT)
+Conform more closely to IBM XLC semantics
+
+mswdiv
+Target Report Var(swdiv)
+Generate software floating point divide for better throughput
+
+mno-fp-in-toc
+Target Report RejectNegative Var(TARGET_NO_FP_IN_TOC)
+Do not place floating point constants in TOC
+
+mfp-in-toc
+Target Report RejectNegative Var(TARGET_NO_FP_IN_TOC,0)
+Place floating point constants in TOC
+
+mno-sum-in-toc
+Target RejectNegative Var(TARGET_NO_SUM_IN_TOC)
+Do not place symbol+offset constants in TOC
+
+msum-in-toc
+Target RejectNegative Var(TARGET_NO_SUM_IN_TOC,0) VarExists
+Place symbol+offset constants in TOC
+
+; Output only one TOC entry per module. Normally linking fails if
+; there are more than 16K unique variables/constants in an executable. With
+; this option, linking fails only if there are more than 16K modules, or
+; if there are more than 16K unique variables/constant in a single module.
+;
+; This is at the cost of having 2 extra loads and one extra store per
+; function, and one less allocable register.
+mminimal-toc
+Target Report Mask(MINIMAL_TOC)
+Use only one TOC entry per procedure
+
+mfull-toc
+Target Report
+Put everything in the regular TOC
+
+mvrsave
+Target Report Var(TARGET_ALTIVEC_VRSAVE)
+Generate VRSAVE instructions when generating AltiVec code
+
+mvrsave=
+Target RejectNegative Joined
+-mvrsave=yes/no Deprecated option. Use -mvrsave/-mno-vrsave instead
+
+misel
+Target Var(rs6000_isel)
+Generate isel instructions
+
+misel=
+Target RejectNegative Joined
+-misel=yes/no Deprecated option. Use -misel/-mno-isel instead
+
+mspe
+Target Var(rs6000_spe)
+Generate SPE SIMD instructions on E500
+
+mspe=
+Target RejectNegative Joined
+-mspe=yes/no Deprecated option. Use -mspe/-mno-spe instead
+
+mdebug=
+Target RejectNegative Joined
+-mdebug= Enable debug output
+
+mabi=
+Target RejectNegative Joined
+-mabi= Specify ABI to use
+
+mcpu=
+Target RejectNegative Joined
+-mcpu= Use features of and schedule code for given CPU
+
+mtune=
+Target RejectNegative Joined
+-mtune= Schedule code for given CPU
+
+mtraceback=
+Target RejectNegative Joined
+-mtraceback= Select full, part, or no traceback table
+
+mlongcall
+Target Report Var(rs6000_default_long_calls)
+Avoid all range limits on call instructions
+
+mwarn-altivec-long
+Target Var(rs6000_warn_altivec_long) Init(1)
+Warn about deprecated 'vector long ...' AltiVec type usage
+
+mfloat-gprs=
+Target RejectNegative Joined
+-mfloat-gprs= Select GPR floating point method
+
+mlong-double-
+Target RejectNegative Joined UInteger
+-mlong-double-<n> Specify size of long double (64 or 128 bits)
+
+msched-costly-dep=
+Target RejectNegative Joined
+Determine which dependences between insns are considered costly
+
+minsert-sched-nops=
+Target RejectNegative Joined
+Specify which post scheduling nop insertion scheme to apply
+
+malign-
+Target RejectNegative Joined
+Specify alignment of structure fields default/natural
+
+mprioritize-restricted-insns=
+Target RejectNegative Joined UInteger Var(rs6000_sched_restricted_insns_priority)
+Specify scheduling priority for dispatch slot restricted insns
diff --git a/contrib/gcc/config/rs6000/rs64.md b/contrib/gcc/config/rs6000/rs64.md
index 4d99875..71ec61d 100644
--- a/contrib/gcc/config/rs6000/rs64.md
+++ b/contrib/gcc/config/rs6000/rs64.md
@@ -1,5 +1,5 @@
;; Scheduling description for IBM RS64 processors.
-;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
@@ -15,8 +15,8 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_automaton "rs64,rs64fp")
(define_cpu_unit "iu_rs64" "rs64")
@@ -31,7 +31,7 @@
(eq_attr "cpu" "rs64a"))
"lsu_rs64")
-(define_insn_reservation "rs64a-store" 1
+(define_insn_reservation "rs64a-store" 2
(and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
(eq_attr "cpu" "rs64a"))
"lsu_rs64")
@@ -41,11 +41,26 @@
(eq_attr "cpu" "rs64a"))
"lsu_rs64")
+(define_insn_reservation "rs64a-llsc" 2
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
(define_insn_reservation "rs64a-integer" 1
(and (eq_attr "type" "integer,insert_word")
(eq_attr "cpu" "rs64a"))
"iu_rs64")
+(define_insn_reservation "rs64a-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,iu_rs64")
+
+(define_insn_reservation "rs64a-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,iu_rs64,iu_rs64")
+
(define_insn_reservation "rs64a-imul" 20
(and (eq_attr "type" "imul,imul_compare")
(eq_attr "cpu" "rs64a"))
@@ -126,3 +141,13 @@
(eq_attr "cpu" "rs64a"))
"bpu_rs64")
+(define_insn_reservation "rs64a-isync" 6
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "rs64a"))
+ "bpu_rs64")
+
+(define_insn_reservation "rs64a-sync" 1
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
diff --git a/contrib/gcc/config/rs6000/rtems.h b/contrib/gcc/config/rs6000/rtems.h
index d83e1eb..7c49186 100644
--- a/contrib/gcc/config/rs6000/rtems.h
+++ b/contrib/gcc/config/rs6000/rtems.h
@@ -1,5 +1,5 @@
/* Definitions for rtems targeting a PowerPC using elf.
- Copyright (C) 1996, 1997, 2000, 2001, 2002, 2003, 2005
+ Copyright (C) 1996, 1997, 2000, 2001, 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
Contributed by Joel Sherrill (joel@OARcorp.com).
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Specify predefined symbols in preprocessor. */
diff --git a/contrib/gcc/config/rs6000/secureplt.h b/contrib/gcc/config/rs6000/secureplt.h
new file mode 100644
index 0000000..a02af8d
--- /dev/null
+++ b/contrib/gcc/config/rs6000/secureplt.h
@@ -0,0 +1,21 @@
+/* Default to -msecure-plt.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#define CC1_SECURE_PLT_DEFAULT_SPEC "-msecure-plt"
diff --git a/contrib/gcc/config/rs6000/sfp-machine.h b/contrib/gcc/config/rs6000/sfp-machine.h
new file mode 100644
index 0000000..29173e2
--- /dev/null
+++ b/contrib/gcc/config/rs6000/sfp-machine.h
@@ -0,0 +1,63 @@
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+
+/* Someone please check this. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+#if defined __BIG_ENDIAN__ || defined _BIG_ENDIAN
+# if defined __LITTLE_ENDIAN__ || defined _LITTLE_ENDIAN
+# error "Both BIG_ENDIAN and LITTLE_ENDIAN defined!"
+# endif
+# define __BYTE_ORDER __BIG_ENDIAN
+#else
+# if defined __LITTLE_ENDIAN__ || defined _LITTLE_ENDIAN
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# else
+# error "Cannot determine current byte order"
+# endif
+#endif
+
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
diff --git a/contrib/gcc/config/rs6000/sol-ci.asm b/contrib/gcc/config/rs6000/sol-ci.asm
index cc97b0a..35e3b8e 100644
--- a/contrib/gcc/config/rs6000/sol-ci.asm
+++ b/contrib/gcc/config/rs6000/sol-ci.asm
@@ -23,8 +23,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
-# the Free Software Foundation, 59 Temple Place - Suite 330,
-# Boston, MA 02111-1307, USA.
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
#
# As a special exception, if you link this library with files
# compiled with GCC to produce an executable, this does not cause
diff --git a/contrib/gcc/config/rs6000/sol-cn.asm b/contrib/gcc/config/rs6000/sol-cn.asm
index 673540f..8546834 100644
--- a/contrib/gcc/config/rs6000/sol-cn.asm
+++ b/contrib/gcc/config/rs6000/sol-cn.asm
@@ -23,8 +23,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
-# the Free Software Foundation, 59 Temple Place - Suite 330,
-# Boston, MA 02111-1307, USA.
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
#
# As a special exception, if you link this library with files
# compiled with GCC to produce an executable, this does not cause
diff --git a/contrib/gcc/config/rs6000/spe.h b/contrib/gcc/config/rs6000/spe.h
index 878fc72..4e2d7f6 100644
--- a/contrib/gcc/config/rs6000/spe.h
+++ b/contrib/gcc/config/rs6000/spe.h
@@ -1,5 +1,5 @@
/* PowerPC E500 user include file.
- Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Aldy Hernandez (aldyh@redhat.com).
This file is part of GCC.
@@ -16,8 +16,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* As a special exception, if you include this header file into source
files compiled by GCC, this header file does not by itself cause
diff --git a/contrib/gcc/config/rs6000/spe.md b/contrib/gcc/config/rs6000/spe.md
index 306accd..7d05e08 100644
--- a/contrib/gcc/config/rs6000/spe.md
+++ b/contrib/gcc/config/rs6000/spe.md
@@ -1,5 +1,5 @@
;; e500 SPE description
-;; Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+;; Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
;; This file is part of GCC.
@@ -16,12 +16,24 @@
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING. If not, write to the
-;; Free Software Foundation, 59 Temple Place - Suite 330, Boston,
-;; MA 02111-1307, USA.
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
(define_constants
[(SPE_ACC_REGNO 111)
- (SPEFSCR_REGNO 112)])
+ (SPEFSCR_REGNO 112)
+
+ (CMPDFEQ_GPR 1006)
+ (TSTDFEQ_GPR 1007)
+ (CMPDFGT_GPR 1008)
+ (TSTDFGT_GPR 1009)
+ (CMPDFLT_GPR 1010)
+ (TSTDFLT_GPR 1011)
+ (E500_CR_IOR_COMPARE 1012)
+ ])
+
+;; Modes using a 64-bit register.
+(define_mode_macro SPE64 [DF V4HI V2SF V1DI V2SI])
(define_insn "*negsf2_gpr"
[(set (match_operand:SF 0 "gpc_reg_operand" "=r")
@@ -76,6 +88,22 @@
"efsdiv %0,%1,%2"
[(set_attr "type" "vecfdiv")])
+;; Floating point conversion instructions.
+
+(define_insn "fixuns_truncdfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unsigned_fix:SI (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdctuiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_extendsfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (float_extend:DF (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfs %0,%1"
+ [(set_attr "type" "fp")])
+
(define_insn "spe_fixuns_truncsfsi2"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(unsigned_fix:SI (match_operand:SF 1 "gpc_reg_operand" "r")))]
@@ -90,6 +118,13 @@
"efsctsiz %0,%1"
[(set_attr "type" "fp")])
+(define_insn "spe_fix_truncdfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdctsiz %0,%1"
+ [(set_attr "type" "fp")])
+
(define_insn "spe_floatunssisf2"
[(set (match_operand:SF 0 "gpc_reg_operand" "=r")
(unsigned_float:SF (match_operand:SI 1 "gpc_reg_operand" "r")))]
@@ -97,6 +132,13 @@
"efscfui %0,%1"
[(set_attr "type" "fp")])
+(define_insn "spe_floatunssidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfui %0,%1"
+ [(set_attr "type" "fp")])
+
(define_insn "spe_floatsisf2"
[(set (match_operand:SF 0 "gpc_reg_operand" "=r")
(float:SF (match_operand:SI 1 "gpc_reg_operand" "r")))]
@@ -104,6 +146,12 @@
"efscfsi %0,%1"
[(set_attr "type" "fp")])
+(define_insn "spe_floatsidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfsi %0,%1"
+ [(set_attr "type" "fp")])
;; SPE SIMD instructions
@@ -2147,6 +2195,165 @@
[(set_attr "type" "vecstore")
(set_attr "length" "4")])
+;; Double-precision floating point instructions.
+
+;; FIXME: Add o=r option.
+(define_insn "*frob_df_di"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r")
+ (subreg:DF (match_operand:DI 1 "input_operand" "r,m") 0))]
+ "TARGET_E500_DOUBLE"
+ "@
+ evmergelo %0,%1,%L1
+ evldd%X1 %0,%y1")
+
+(define_insn "*frob_di_df"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=&r")
+ (subreg:DI (match_operand:DF 1 "input_operand" "r") 0))]
+ "TARGET_E500_DOUBLE"
+ "evmergehi %0,%1,%1\;mr %L0,%1"
+ [(set_attr "length" "8")])
+
+(define_insn "*frob_di_df_2"
+ [(set (subreg:DF (match_operand:DI 0 "register_operand" "=&r,r") 0)
+ (match_operand:DF 1 "input_operand" "r,m"))]
+ "TARGET_E500_DOUBLE"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ return \"evmergehi %0,%1,%1\;mr %L0,%1\";
+ case 1:
+ /* If the address is not offsettable we need to load the whole
+ doubleword into a 64-bit register and then copy the high word
+ to form the correct output layout. */
+ if (!offsettable_nonstrict_memref_p (operands[1]))
+ return \"evldd%X1 %L0,%y1\;evmergehi %0,%L0,%L0\";
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1%X1|lwz%U1%X1} %0,%1\;{l|lwz} %L0,%L1\";
+ }
+}"
+ [(set_attr "length" "8,8")])
+
+(define_insn "*mov_si<mode>_e500_subreg0"
+ [(set (subreg:SI (match_operand:SPE64 0 "register_operand" "+r,&r") 0)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ evmergelo %0,%1,%0
+ evmergelohi %0,%0,%0\;{l%U1%X1|lwz%U1%X1} %0,%1\;evmergelohi %0,%0,%0")
+
+;; ??? Could use evstwwe for memory stores in some cases, depending on
+;; the offset.
+(define_insn "*mov_si<mode>_e500_subreg0_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:SPE64 1 "register_operand" "+r,&r") 0))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ evmergehi %0,%0,%1
+ evmergelohi %1,%1,%1\;{st%U0%X0|stw%U0%X0} %1,%0")
+
+(define_insn "*mov_si<mode>_e500_subreg4"
+ [(set (subreg:SI (match_operand:SPE64 0 "register_operand" "+r,r") 4)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1")
+
+(define_insn "*mov_si<mode>_e500_subreg4_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:SPE64 1 "register_operand" "r,r") 4))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ mr %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0")
+
+;; FIXME: Allow r=CONST0.
+(define_insn "*movdf_e500_double"
+ [(set (match_operand:DF 0 "rs6000_nonimmediate_operand" "=r,r,m")
+ (match_operand:DF 1 "input_operand" "r,m,r"))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return \"evor %0,%1,%1\";
+ case 1:
+ return \"evldd%X1 %0,%y1\";
+ case 2:
+ return \"evstdd%X0 %1,%y0\";
+ default:
+ gcc_unreachable ();
+ }
+ }"
+ [(set_attr "type" "*,vecload,vecstore")
+ (set_attr "length" "*,*,*")])
+
+(define_insn "spe_truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efscfd %0,%1")
+
+(define_insn "spe_absdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdabs %0,%1")
+
+(define_insn "spe_nabsdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "r"))))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdnabs %0,%1")
+
+(define_insn "spe_negdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdneg %0,%1")
+
+(define_insn "spe_adddf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdadd %0,%1,%2")
+
+(define_insn "spe_subdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdsub %0,%1,%2")
+
+(define_insn "spe_muldf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdmul %0,%1,%2")
+
+(define_insn "spe_divdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efddiv %0,%1,%2")
+
;; Vector move instructions.
(define_expand "movv2si"
@@ -2169,7 +2376,7 @@
case 1: return \"evldd%X1 %0,%y1\";
case 2: return \"evor %0,%1,%1\";
case 3: return output_vec_const_move (operands);
- default: abort ();
+ default: gcc_unreachable ();
}
}"
[(set_attr "type" "vecload,vecstore,*,*")
@@ -2210,15 +2417,16 @@
"{ rs6000_emit_move (operands[0], operands[1], V4HImode); DONE; }")
(define_insn "*movv4hi_internal"
- [(set (match_operand:V4HI 0 "nonimmediate_operand" "=m,r,r")
- (match_operand:V4HI 1 "input_operand" "r,m,r"))]
+ [(set (match_operand:V4HI 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V4HI 1 "input_operand" "r,m,r,W"))]
"TARGET_SPE
&& (gpc_reg_operand (operands[0], V4HImode)
|| gpc_reg_operand (operands[1], V4HImode))"
"@
evstdd%X0 %1,%y0
evldd%X1 %0,%y1
- evor %0,%1,%1"
+ evor %0,%1,%1
+ evxor %0,%0,%0"
[(set_attr "type" "vecload")])
(define_expand "movv2sf"
@@ -2458,14 +2666,14 @@
;; FP comparison stuff.
;; Flip the GT bit.
-(define_insn "e500_flip_eq_bit"
+(define_insn "e500_flip_gt_bit"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
- (unspec:CCFP
- [(match_operand:CCFP 1 "cc_reg_operand" "y")] 999))]
+ (unspec:CCFP
+ [(match_operand:CCFP 1 "cc_reg_operand" "y")] 999))]
"!TARGET_FPRS && TARGET_HARD_FLOAT"
"*
{
- return output_e500_flip_eq_bit (operands[0], operands[1]);
+ return output_e500_flip_gt_bit (operands[0], operands[1]);
}"
[(set_attr "type" "cr_logical")])
@@ -2475,60 +2683,132 @@
(define_insn "cmpsfeq_gpr"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
- (unspec:CCFP
- [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
- (match_operand:SF 2 "gpc_reg_operand" "r"))]
- 1000))]
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1000))]
"TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations"
"efscmpeq %0,%1,%2"
[(set_attr "type" "veccmp")])
(define_insn "tstsfeq_gpr"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
- (unspec:CCFP
- [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
- (match_operand:SF 2 "gpc_reg_operand" "r"))]
- 1001))]
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1001))]
"TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations"
"efststeq %0,%1,%2"
[(set_attr "type" "veccmpsimple")])
(define_insn "cmpsfgt_gpr"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
- (unspec:CCFP
- [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
- (match_operand:SF 2 "gpc_reg_operand" "r"))]
- 1002))]
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1002))]
"TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations"
"efscmpgt %0,%1,%2"
[(set_attr "type" "veccmp")])
(define_insn "tstsfgt_gpr"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
- (unspec:CCFP
- [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
- (match_operand:SF 2 "gpc_reg_operand" "r"))]
- 1003))]
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1003))]
"TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations"
"efststgt %0,%1,%2"
[(set_attr "type" "veccmpsimple")])
(define_insn "cmpsflt_gpr"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
- (unspec:CCFP
- [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
- (match_operand:SF 2 "gpc_reg_operand" "r"))]
- 1004))]
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1004))]
"TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations"
"efscmplt %0,%1,%2"
[(set_attr "type" "veccmp")])
(define_insn "tstsflt_gpr"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
- (unspec:CCFP
- [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
- (match_operand:SF 2 "gpc_reg_operand" "r"))]
- 1005))]
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1005))]
"TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations"
"efststlt %0,%1,%2"
[(set_attr "type" "veccmpsimple")])
+
+;; Same thing, but for double-precision.
+
+(define_insn "cmpdfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFEQ_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations"
+ "efdcmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFEQ_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations"
+ "efdtsteq %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpdfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFGT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations"
+ "efdcmpgt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFGT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations"
+ "efdtstgt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpdflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFLT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations"
+ "efdcmplt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFLT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations"
+ "efdtstlt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+;; Like cceq_ior_compare, but compare the GT bits.
+(define_insn "e500_cr_ior_compare"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP [(match_operand 1 "cc_reg_operand" "y")
+ (match_operand 2 "cc_reg_operand" "y")]
+ E500_CR_IOR_COMPARE))]
+ "TARGET_E500"
+ "cror 4*%0+gt,4*%1+gt,4*%2+gt"
+ [(set_attr "type" "cr_logical")])
diff --git a/contrib/gcc/config/rs6000/sync.md b/contrib/gcc/config/rs6000/sync.md
new file mode 100644
index 0000000..b716647
--- /dev/null
+++ b/contrib/gcc/config/rs6000/sync.md
@@ -0,0 +1,625 @@
+;; Machine description for PowerPC synchronization instructions.
+;; Copyright (C) 2005 Free Software Foundation, Inc.
+;; Contributed by Geoffrey Keating.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_mode_attr larx [(SI "lwarx") (DI "ldarx")])
+(define_mode_attr stcx [(SI "stwcx.") (DI "stdcx.")])
+
+(define_code_macro FETCHOP [plus minus ior xor and])
+(define_code_attr fetchop_name
+ [(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])
+(define_code_attr fetchop_pred
+ [(plus "add_operand") (minus "gpc_reg_operand")
+ (ior "logical_operand") (xor "logical_operand") (and "and_operand")])
+(define_code_attr fetchopsi_constr
+ [(plus "rIL") (minus "r") (ior "rKL") (xor "rKL") (and "rTKL")])
+(define_code_attr fetchopdi_constr
+ [(plus "rIL") (minus "r") (ior "rKJF") (xor "rKJF") (and "rSTKJ")])
+
+(define_expand "memory_barrier"
+ [(set (mem:BLK (match_dup 0))
+ (unspec:BLK [(mem:BLK (match_dup 0))] UNSPEC_SYNC))]
+ ""
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*sync_internal"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_operand:BLK 1 "" "")] UNSPEC_SYNC))]
+ ""
+ "{dcs|sync}"
+ [(set_attr "type" "sync")])
+
+(define_insn "load_locked_<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (unspec_volatile:GPR
+ [(match_operand:GPR 1 "memory_operand" "Z")] UNSPECV_LL))]
+ "TARGET_POWERPC"
+ "<larx> %0,%y1"
+ [(set_attr "type" "load_l")])
+
+(define_insn "store_conditional_<mode>"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_SC))
+ (set (match_operand:GPR 1 "memory_operand" "=Z")
+ (match_operand:GPR 2 "gpc_reg_operand" "r"))]
+ "TARGET_POWERPC"
+ "<stcx> %2,%y1"
+ [(set_attr "type" "store_c")])
+
+(define_insn_and_split "sync_compare_and_swap<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(match_operand:GPR 2 "reg_or_short_operand" "rI")
+ (match_operand:GPR 3 "gpc_reg_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:GPR 4 "=&r"))
+ (clobber (match_scratch:CC 5 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_compare_and_swap (operands[0], operands[1], operands[2],
+ operands[3], operands[4]);
+ DONE;
+})
+
+(define_expand "sync_compare_and_swaphi"
+ [(match_operand:HI 0 "gpc_reg_operand" "")
+ (match_operand:HI 1 "memory_operand" "")
+ (match_operand:HI 2 "gpc_reg_operand" "")
+ (match_operand:HI 3 "gpc_reg_operand" "")]
+ "TARGET_POWERPC"
+{
+ rs6000_expand_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_expand "sync_compare_and_swapqi"
+ [(match_operand:QI 0 "gpc_reg_operand" "")
+ (match_operand:QI 1 "memory_operand" "")
+ (match_operand:QI 2 "gpc_reg_operand" "")
+ (match_operand:QI 3 "gpc_reg_operand" "")]
+ "TARGET_POWERPC"
+{
+ rs6000_expand_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "sync_compare_and_swapqhi_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (match_operand:SI 4 "memory_operand" "+Z"))
+ (set (match_dup 4)
+ (unspec:SI
+ [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand:SI 3 "gpc_reg_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:SI 5 "=&r"))
+ (clobber (match_scratch:CC 6 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3], operands[4],
+ operands[5]);
+ DONE;
+})
+
+(define_insn_and_split "sync_lock_test_and_set<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(match_operand:GPR 2 "reg_or_short_operand" "rL")]
+ UNSPEC_XCHG))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_lock_test_and_set (operands[0], operands[1], operands[2],
+ operands[3]);
+ DONE;
+})
+
+(define_expand "sync_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "memory_operand" "")
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 0)
+ (match_operand:INT1 1 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "memory_operand" "+Z")
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 0)
+ (match_operand:SI 1 "<fetchop_pred>" "<fetchopsi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "memory_operand" "+Z")
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 0)
+ (match_operand:DI 1 "<fetchop_pred>" "<fetchopdi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_expand "sync_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "memory_operand" "")
+ (unspec:INT1
+ [(and:INT1 (not:INT1 (match_dup 0))
+ (match_operand:INT1 1 "gpc_reg_operand" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (AND, <MODE>mode,
+ gen_rtx_NOT (<MODE>mode, operands[0]),
+ operands[1],
+ NULL_RTX, NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "memory_operand" "+Z")
+ (unspec:GPR
+ [(and:GPR (not:GPR (match_dup 0))
+ (match_operand:GPR 1 "gpc_reg_operand" "r"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 2 "=&r"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_expand "sync_old_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (match_operand:INT1 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 1)
+ (match_operand:INT1 2 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[1], operands[2],
+ operands[0], NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_old_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (match_operand:SI 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1)
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchopsi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_old_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (match_operand:DI 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 1)
+ (match_operand:DI 2 "<fetchop_pred>" "<fetchopdi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_expand "sync_old_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (match_operand:INT1 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(and:INT1 (not:INT1 (match_dup 1))
+ (match_operand:INT1 2 "gpc_reg_operand" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (AND, <MODE>mode,
+ gen_rtx_NOT (<MODE>mode, operands[1]),
+ operands[2],
+ operands[0], NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_old_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(and:GPR (not:GPR (match_dup 1))
+ (match_operand:GPR 2 "gpc_reg_operand" "r"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_expand "sync_new_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (FETCHOP:INT1
+ (match_operand:INT1 1 "memory_operand" "")
+ (match_operand:INT1 2 "<fetchop_pred>" "")))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[1], operands[2],
+ NULL_RTX, operands[0], true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_new_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (FETCHOP:SI
+ (match_operand:SI 1 "memory_operand" "+Z")
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchopsi_constr>")))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_new_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (FETCHOP:DI
+ (match_operand:DI 1 "memory_operand" "+Z")
+ (match_operand:DI 2 "<fetchop_pred>" "<fetchopdi_constr>")))
+ (set (match_dup 1)
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+(define_expand "sync_new_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (and:INT1
+ (not:INT1 (match_operand:INT1 1 "memory_operand" ""))
+ (match_operand:INT1 2 "gpc_reg_operand" "")))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(and:INT1 (not:INT1 (match_dup 1)) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (AND, <MODE>mode,
+ gen_rtx_NOT (<MODE>mode, operands[1]),
+ operands[2],
+ NULL_RTX, operands[0], true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_new_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (and:GPR
+ (not:GPR (match_operand:GPR 1 "memory_operand" "+Z"))
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(and:GPR (not:GPR (match_dup 1)) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+; and<mode> without cr0 clobber to avoid generation of additional clobber
+; in atomic splitters causing internal consistency failure.
+; cr0 already clobbered by larx/stcx.
+(define_insn "*atomic_andsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "and_operand" "?r,T,K,L")]
+ UNSPEC_AND))]
+ ""
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2"
+ [(set_attr "type" "*,*,compare,compare")])
+
+(define_insn "*atomic_anddi"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:DI 2 "and_operand" "?r,S,T,K,J")]
+ UNSPEC_AND))]
+ "TARGET_POWERPC64"
+ "@
+ and %0,%1,%2
+ rldic%B2 %0,%1,0,%S2
+ rlwinm %0,%1,0,%m2,%M2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2"
+ [(set_attr "type" "*,*,*,compare,compare")
+ (set_attr "length" "4,4,4,4,4")])
+
+; the sync_*_internal patterns all have these operands:
+; 0 - memory location
+; 1 - operand
+; 2 - value in memory after operation
+; 3 - value in memory immediately before operation
+
+(define_insn "*sync_addshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (ior:SI (and:SI (plus:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 1 "add_operand" "rI"))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0))))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(ior:SI (and:SI (plus:SI (match_dup 0) (match_dup 1))
+ (match_dup 4))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0)))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x"))
+ (clobber (match_scratch:SI 6 "=&r"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\tadd%I1 %2,%3,%1\n\tandc %6,%3,%4\n\tand %2,%2,%4\n\tor %2,%2,%6\n\tstwcx. %2,%y0\n\tbne- $-24"
+ [(set_attr "length" "28")])
+
+(define_insn "*sync_subshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (ior:SI (and:SI (minus:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 1 "add_operand" "rI"))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0))))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(ior:SI (and:SI (minus:SI (match_dup 0) (match_dup 1))
+ (match_dup 4))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0)))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x"))
+ (clobber (match_scratch:SI 6 "=&r"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\tsubf %2,%1,%3\n\tandc %6,%3,%4\n\tand %2,%2,%4\n\tor %2,%2,%6\n\tstwcx. %2,%y0\n\tbne- $-24"
+ [(set_attr "length" "28")])
+
+(define_insn "*sync_andsi_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (and:SI (match_operand:SI 0 "memory_operand" "+Z,Z,Z,Z")
+ (match_operand:SI 1 "and_operand" "r,T,K,L")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b,&b,&b,&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(and:SI (match_dup 0) (match_dup 1))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 4 "=&x,&x,&x,&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "@
+ lwarx %3,%y0\n\tand %2,%3,%1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\trlwinm %2,%3,0,%m1,%M1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\tandi. %2,%3,%b1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\tandis. %2,%3,%u1\n\tstwcx. %2,%y0\n\tbne- $-12"
+ [(set_attr "length" "16,16,16,16")])
+
+(define_insn "*sync_boolsi_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r,&r,&r")
+ (match_operator:SI 4 "boolean_or_operator"
+ [(match_operand:SI 0 "memory_operand" "+Z,Z,Z")
+ (match_operand:SI 1 "logical_operand" "r,K,L")]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b,&b,&b") (match_dup 0))
+ (set (match_dup 0) (unspec:SI [(match_dup 4)] UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x,&x,&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "@
+ lwarx %3,%y0\n\t%q4 %2,%3,%1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\t%q4i %2,%3,%b1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\t%q4is %2,%3,%u1\n\tstwcx. %2,%y0\n\tbne- $-12"
+ [(set_attr "length" "16,16,16")])
+
+; This pattern could also take immediate values of operand 1,
+; since the non-NOT version of the operator is used; but this is not
+; very useful, since in practice operand 1 is a full 32-bit value.
+; Likewise, operand 5 is in practice either <= 2^16 or it is a register.
+(define_insn "*sync_boolcshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (match_operator:SI 4 "boolean_operator"
+ [(xor:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 5 "logical_operand" "rK"))
+ (match_operand:SI 1 "gpc_reg_operand" "r")]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0) (unspec:SI [(match_dup 4)] UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 6 "=&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\txor%I2 %2,%3,%5\n\t%q4 %2,%2,%1\n\tstwcx. %2,%y0\n\tbne- $-16"
+ [(set_attr "length" "20")])
+
+(define_insn "isync"
+ [(set (mem:BLK (match_scratch 0 "X"))
+ (unspec_volatile:BLK [(mem:BLK (match_scratch 1 "X"))] UNSPEC_ISYNC))]
+ ""
+ "{ics|isync}"
+ [(set_attr "type" "isync")])
+
+(define_expand "sync_lock_release<mode>"
+ [(set (match_operand:INT 0 "memory_operand")
+ (match_operand:INT 1 "any_operand"))]
+ ""
+ "
+{
+ emit_insn (gen_lwsync ());
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+}")
+
+; Some AIX assemblers don't accept lwsync, so we use a .long.
+(define_insn "lwsync"
+ [(set (mem:BLK (match_scratch 0 "X"))
+ (unspec_volatile:BLK [(mem:BLK (match_scratch 1 "X"))] UNSPEC_LWSYNC))]
+ ""
+{
+ if (TARGET_NO_LWSYNC)
+ return "sync";
+ else
+ return ".long 0x7c2004ac";
+}
+ [(set_attr "type" "sync")])
+
diff --git a/contrib/gcc/config/rs6000/sysv4.h b/contrib/gcc/config/rs6000/sysv4.h
index d367aa8..0adde2b 100644
--- a/contrib/gcc/config/rs6000/sysv4.h
+++ b/contrib/gcc/config/rs6000/sysv4.h
@@ -1,6 +1,6 @@
/* Target definitions for GNU compiler for PowerPC running System V.4
Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
- 2004, 2005 Free Software Foundation, Inc.
+ 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
Contributed by Cygnus Support.
This file is part of GCC.
@@ -17,10 +17,11 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
/* Header files should be C++ aware in general. */
+#undef NO_IMPLICIT_EXTERN_C
#define NO_IMPLICIT_EXTERN_C
/* Yes! We are ELF. */
@@ -46,24 +47,6 @@ enum rs6000_sdata_type {
extern enum rs6000_sdata_type rs6000_sdata;
-/* V.4/eabi switches. */
-#define MASK_NO_BITFIELD_TYPE 0x40000000 /* Set PCC_BITFIELD_TYPE_MATTERS to 0. */
-#define MASK_STRICT_ALIGN 0x20000000 /* Set STRICT_ALIGNMENT to 1. */
-#define MASK_RELOCATABLE 0x10000000 /* GOT pointers are PC relative. */
-#define MASK_EABI 0x08000000 /* Adhere to eabi, not System V spec. */
-#define MASK_LITTLE_ENDIAN 0x04000000 /* Target is little endian. */
-#define MASK_REGNAMES 0x02000000 /* Use alternate register names. */
-#define MASK_PROTOTYPE 0x01000000 /* Only prototyped fcns pass variable args. */
-#define MASK_NO_BITFIELD_WORD 0x00800000 /* Bitfields cannot cross word boundaries */
-
-#define TARGET_NO_BITFIELD_TYPE (target_flags & MASK_NO_BITFIELD_TYPE)
-#define TARGET_STRICT_ALIGN (target_flags & MASK_STRICT_ALIGN)
-#define TARGET_RELOCATABLE (target_flags & MASK_RELOCATABLE)
-#define TARGET_EABI (target_flags & MASK_EABI)
-#define TARGET_LITTLE_ENDIAN (target_flags & MASK_LITTLE_ENDIAN)
-#define TARGET_REGNAMES (target_flags & MASK_REGNAMES)
-#define TARGET_PROTOTYPE (target_flags & MASK_PROTOTYPE)
-#define TARGET_NO_BITFIELD_WORD (target_flags & MASK_NO_BITFIELD_WORD)
#define TARGET_TOC ((target_flags & MASK_64BIT) \
|| ((target_flags & (MASK_RELOCATABLE \
| MASK_MINIMAL_TOC)) \
@@ -76,91 +59,17 @@ extern enum rs6000_sdata_type rs6000_sdata;
#define TARGET_NO_TOC (! TARGET_TOC)
#define TARGET_NO_EABI (! TARGET_EABI)
-/* Strings provided by SUBTARGET_OPTIONS */
+#ifdef HAVE_AS_REL16
+#undef TARGET_SECURE_PLT
+#define TARGET_SECURE_PLT secure_plt
+#endif
+
extern const char *rs6000_abi_name;
extern const char *rs6000_sdata_name;
extern const char *rs6000_tls_size_string; /* For -mtls-size= */
-/* Override rs6000.h definition. */
-#undef SUBTARGET_OPTIONS
-#define SUBTARGET_OPTIONS \
- { "call-", &rs6000_abi_name, N_("Select ABI calling convention"), 0}, \
- { "sdata=", &rs6000_sdata_name, N_("Select method for sdata handling"), 0}, \
- { "tls-size=", &rs6000_tls_size_string, \
- N_("Specify bit size of immediate TLS offsets"), 0 }
-
#define SDATA_DEFAULT_SIZE 8
-/* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be just
- the same as -mminimal-toc. */
-/* Override rs6000.h definition. */
-#undef SUBTARGET_SWITCHES
-#define SUBTARGET_SWITCHES \
- { "bit-align", -MASK_NO_BITFIELD_TYPE, \
- N_("Align to the base type of the bit-field") }, \
- { "no-bit-align", MASK_NO_BITFIELD_TYPE, \
- N_("Don't align to the base type of the bit-field") }, \
- { "strict-align", MASK_STRICT_ALIGN, \
- N_("Don't assume that unaligned accesses are handled by the system") }, \
- { "no-strict-align", -MASK_STRICT_ALIGN, \
- N_("Assume that unaligned accesses are handled by the system") }, \
- { "relocatable", MASK_RELOCATABLE | MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC, \
- N_("Produce code relocatable at runtime") }, \
- { "no-relocatable", -MASK_RELOCATABLE, \
- N_("Don't produce code relocatable at runtime") }, \
- { "relocatable-lib", MASK_RELOCATABLE | MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC, \
- N_("Produce code relocatable at runtime") }, \
- { "no-relocatable-lib", -MASK_RELOCATABLE, \
- N_("Don't produce code relocatable at runtime") }, \
- { "little-endian", MASK_LITTLE_ENDIAN, \
- N_("Produce little endian code") }, \
- { "little", MASK_LITTLE_ENDIAN, \
- N_("Produce little endian code") }, \
- { "big-endian", -MASK_LITTLE_ENDIAN, \
- N_("Produce big endian code") }, \
- { "big", -MASK_LITTLE_ENDIAN, \
- N_("Produce big endian code") }, \
- { "no-toc", 0, N_("no description yet") }, \
- { "toc", MASK_MINIMAL_TOC, N_("no description yet") }, \
- { "full-toc", MASK_MINIMAL_TOC, N_("no description yet") }, \
- { "prototype", MASK_PROTOTYPE, \
- N_("Assume all variable arg functions are prototyped") }, \
- { "no-prototype", -MASK_PROTOTYPE, \
- N_("Non-prototyped functions might take a variable number of args") }, \
- { "no-traceback", 0, N_("no description yet") }, \
- { "eabi", MASK_EABI, N_("Use EABI") }, \
- { "no-eabi", -MASK_EABI, N_("Don't use EABI") }, \
- { "bit-word", -MASK_NO_BITFIELD_WORD, "" }, \
- { "no-bit-word", MASK_NO_BITFIELD_WORD, \
- N_("Do not allow bit-fields to cross word boundaries") }, \
- { "regnames", MASK_REGNAMES, \
- N_("Use alternate register names") }, \
- { "no-regnames", -MASK_REGNAMES, \
- N_("Don't use alternate register names") }, \
- { "sdata", 0, N_("no description yet") }, \
- { "no-sdata", 0, N_("no description yet") }, \
- { "sim", 0, \
- N_("Link with libsim.a, libc.a and sim-crt0.o") }, \
- { "ads", 0, \
- N_("Link with libads.a, libc.a and crt0.o") }, \
- { "yellowknife", 0, \
- N_("Link with libyk.a, libc.a and crt0.o") }, \
- { "mvme", 0, \
- N_("Link with libmvme.a, libc.a and crt0.o") }, \
- { "emb", 0, \
- N_("Set the PPC_EMB bit in the ELF flags header") }, \
- { "windiss", 0, N_("Use the WindISS simulator") }, \
- { "shlib", 0, N_("no description yet") }, \
- { "64", MASK_64BIT | MASK_POWERPC64 | MASK_POWERPC, \
- N_("Generate 64-bit code") }, \
- { "32", - (MASK_64BIT | MASK_POWERPC64), \
- N_("Generate 32-bit code") }, \
- EXTRA_SUBTARGET_SWITCHES \
- { "newlib", 0, N_("no description yet") },
-
-/* This is meant to be redefined in the host dependent files. */
-#define EXTRA_SUBTARGET_SWITCHES
-
/* Sometimes certain combinations of command options do not make sense
on a particular target machine. You can define a macro
`OVERRIDE_OPTIONS' to take account of this. This macro, if
@@ -196,7 +105,12 @@ do { \
else if (!strcmp (rs6000_abi_name, "freebsd")) \
rs6000_current_abi = ABI_V4; \
else if (!strcmp (rs6000_abi_name, "linux")) \
- rs6000_current_abi = ABI_V4; \
+ { \
+ if (TARGET_64BIT) \
+ rs6000_current_abi = ABI_AIX; \
+ else \
+ rs6000_current_abi = ABI_V4; \
+ } \
else if (!strcmp (rs6000_abi_name, "gnu")) \
rs6000_current_abi = ABI_V4; \
else if (!strcmp (rs6000_abi_name, "netbsd")) \
@@ -296,9 +210,17 @@ do { \
error ("-mcall-aixdesc must be big endian"); \
} \
\
+ if (TARGET_SECURE_PLT != secure_plt) \
+ { \
+ error ("-msecure-plt not supported by your assembler"); \
+ } \
+ \
/* Treat -fPIC the same as -mrelocatable. */ \
if (flag_pic > 1 && DEFAULT_ABI != ABI_AIX) \
- target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC; \
+ { \
+ target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC; \
+ TARGET_NO_FP_IN_TOC = 1; \
+ } \
\
else if (TARGET_RELOCATABLE) \
flag_pic = 2; \
@@ -321,16 +243,15 @@ do { \
#undef PROCESSOR_DEFAULT
#define PROCESSOR_DEFAULT PROCESSOR_PPC750
+/* SVR4 only defined for PowerPC, so short-circuit POWER patterns. */
+#undef TARGET_POWER
+#define TARGET_POWER 0
+
#define FIXED_R2 1
/* System V.4 uses register 13 as a pointer to the small data area,
so it is not available to the normal user. */
#define FIXED_R13 1
-/* Size of the V.4 varargs area if needed. */
-/* Override rs6000.h definition. */
-#undef RS6000_VARARGS_AREA
-#define RS6000_VARARGS_AREA ((cfun->machine->sysv_varargs_p) ? RS6000_VARARGS_SIZE : 0)
-
/* Override default big endianism definitions in rs6000.h. */
#undef BYTES_BIG_ENDIAN
#undef WORDS_BIG_ENDIAN
@@ -412,15 +333,6 @@ do { \
((TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
? 128 : COMPUTED)
-/* Define this macro as an expression for the alignment of a type
- (given by TYPE as a tree node) if the alignment computed in the
- usual way is COMPUTED and the alignment explicitly specified was
- SPECIFIED. */
-#define ROUND_TYPE_ALIGN(TYPE, COMPUTED, SPECIFIED) \
- ((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) \
- ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
- : MAX (COMPUTED, SPECIFIED))
-
#undef BIGGEST_FIELD_ALIGNMENT
/* Use ELF style section commands. */
@@ -450,131 +362,12 @@ do { \
#define SDATA2_SECTION_ASM_OP "\t.section\t\".sdata2\",\"a\""
#define SBSS_SECTION_ASM_OP "\t.section\t\".sbss\",\"aw\",@nobits"
-/* Besides the usual ELF sections, we need a toc section. */
-/* Override elfos.h definition. */
-#undef EXTRA_SECTIONS
-#define EXTRA_SECTIONS in_toc, in_sdata, in_sdata2, in_sbss, in_init, in_fini
-
-/* Override elfos.h definition. */
-#undef EXTRA_SECTION_FUNCTIONS
-#define EXTRA_SECTION_FUNCTIONS \
- TOC_SECTION_FUNCTION \
- SDATA_SECTION_FUNCTION \
- SDATA2_SECTION_FUNCTION \
- SBSS_SECTION_FUNCTION \
- INIT_SECTION_FUNCTION \
- FINI_SECTION_FUNCTION
-
-#define TOC_SECTION_FUNCTION \
-void \
-toc_section (void) \
-{ \
- if (in_section != in_toc) \
- { \
- in_section = in_toc; \
- if (DEFAULT_ABI == ABI_AIX \
- && TARGET_MINIMAL_TOC \
- && !TARGET_RELOCATABLE) \
- { \
- if (! toc_initialized) \
- { \
- toc_initialized = 1; \
- fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP); \
- (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0); \
- fprintf (asm_out_file, "\t.tc "); \
- ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],"); \
- ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1"); \
- fprintf (asm_out_file, "\n"); \
- \
- fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP); \
- ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1"); \
- fprintf (asm_out_file, " = .+32768\n"); \
- } \
- else \
- fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP); \
- } \
- else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE) \
- fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP); \
- else \
- { \
- fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP); \
- if (! toc_initialized) \
- { \
- ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1"); \
- fprintf (asm_out_file, " = .+32768\n"); \
- toc_initialized = 1; \
- } \
- } \
- } \
-} \
- \
-extern int in_toc_section (void); \
-int in_toc_section (void) \
-{ \
- return in_section == in_toc; \
-}
-
-#define SDATA_SECTION_FUNCTION \
-void \
-sdata_section (void) \
-{ \
- if (in_section != in_sdata) \
- { \
- in_section = in_sdata; \
- fprintf (asm_out_file, "%s\n", SDATA_SECTION_ASM_OP); \
- } \
-}
-
-#define SDATA2_SECTION_FUNCTION \
-void \
-sdata2_section (void) \
-{ \
- if (in_section != in_sdata2) \
- { \
- in_section = in_sdata2; \
- fprintf (asm_out_file, "%s\n", SDATA2_SECTION_ASM_OP); \
- } \
-}
-
-#define SBSS_SECTION_FUNCTION \
-void \
-sbss_section (void) \
-{ \
- if (in_section != in_sbss) \
- { \
- in_section = in_sbss; \
- fprintf (asm_out_file, "%s\n", SBSS_SECTION_ASM_OP); \
- } \
-}
-
-#define INIT_SECTION_FUNCTION \
-void \
-init_section (void) \
-{ \
- if (in_section != in_init) \
- { \
- in_section = in_init; \
- fprintf (asm_out_file, "%s\n", INIT_SECTION_ASM_OP); \
- } \
-}
-
-#define FINI_SECTION_FUNCTION \
-void \
-fini_section (void) \
-{ \
- if (in_section != in_fini) \
- { \
- in_section = in_fini; \
- fprintf (asm_out_file, "%s\n", FINI_SECTION_ASM_OP); \
- } \
-}
-
/* Override default elf definitions. */
+#define TARGET_ASM_INIT_SECTIONS rs6000_elf_asm_init_sections
+#undef TARGET_ASM_RELOC_RW_MASK
+#define TARGET_ASM_RELOC_RW_MASK rs6000_elf_reloc_rw_mask
#undef TARGET_ASM_SELECT_RTX_SECTION
#define TARGET_ASM_SELECT_RTX_SECTION rs6000_elf_select_rtx_section
-#undef TARGET_ASM_SELECT_SECTION
-#define TARGET_ASM_SELECT_SECTION rs6000_elf_select_section
-#define TARGET_ASM_UNIQUE_SECTION rs6000_elf_unique_section
/* Return nonzero if this entry is to be written into the constant pool
in a special way. We do so if this is a SYMBOL_REF, LABEL_REF or a CONST
@@ -599,7 +392,7 @@ fini_section (void) \
|| (!TARGET_NO_FP_IN_TOC \
&& !TARGET_RELOCATABLE \
&& GET_CODE (X) == CONST_DOUBLE \
- && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
&& BITS_PER_WORD == HOST_BITS_PER_INT)))
/* These macros generate the special .type and .size directives which
@@ -641,14 +434,12 @@ extern int rs6000_pic_labelno;
#define LCOMM_ASM_OP "\t.lcomm\t"
-/* Override elfos.h definition. */
-#undef ASM_OUTPUT_ALIGNED_LOCAL
-#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+/* Describe how to emit uninitialized local items. */
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
do { \
- if (rs6000_sdata != SDATA_NONE && (SIZE) > 0 \
- && (SIZE) <= g_switch_value) \
+ if ((DECL) && rs6000_elf_in_small_data_p (DECL)) \
{ \
- sbss_section (); \
+ switch_to_section (sbss_section); \
ASM_OUTPUT_ALIGN (FILE, exact_log2 (ALIGN / BITS_PER_UNIT)); \
ASM_OUTPUT_LABEL (FILE, NAME); \
ASM_OUTPUT_SKIP (FILE, SIZE); \
@@ -668,7 +459,7 @@ do { \
/* Describe how to emit uninitialized external linkage items. */
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
do { \
- ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \
+ ASM_OUTPUT_ALIGNED_DECL_LOCAL (FILE, DECL, NAME, SIZE, ALIGN); \
} while (0)
#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
@@ -739,23 +530,8 @@ extern int fixuplabelno;
/* Historically we have also supported stabs debugging. */
#define DBX_DEBUGGING_INFO 1
-#define DBX_REGISTER_NUMBER(REGNO) rs6000_dbx_register_number (REGNO)
-
-/* Map register numbers held in the call frame info that gcc has
- collected using DWARF_FRAME_REGNUM to those that should be output in
- .debug_frame and .eh_frame. We continue to use gcc hard reg numbers
- for .eh_frame, but use the numbers mandated by the various ABIs for
- .debug_frame. rs6000_emit_prologue has translated any combination of
- CR2, CR3, CR4 saves to a save of CR2. The actual code emitted saves
- the whole of CR, so we map CR2_REGNO to the DWARF reg for CR. */
-#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) \
- ((FOR_EH) ? (REGNO) \
- : (REGNO) == CR2_REGNO ? 64 \
- : DBX_REGISTER_NUMBER (REGNO))
-
#define TARGET_ENCODE_SECTION_INFO rs6000_elf_encode_section_info
#define TARGET_IN_SMALL_DATA_P rs6000_elf_in_small_data_p
-#define TARGET_SECTION_TYPE_FLAGS rs6000_elf_section_type_flags
/* The ELF version doesn't encode [DS] or whatever at the end of symbols. */
@@ -773,38 +549,28 @@ extern int fixuplabelno;
#define TARGET_VERSION fprintf (stderr, " (PowerPC System V.4)");
#endif
-#define TARGET_OS_SYSV_CPP_BUILTINS() \
- do \
- { \
- if (flag_pic == 1) \
- { \
- builtin_define ("__pic__=1"); \
- builtin_define ("__PIC__=1"); \
- } \
- else if (flag_pic == 2) \
- { \
- builtin_define ("__pic__=2"); \
- builtin_define ("__PIC__=2"); \
- } \
- if (target_flags_explicit \
- & MASK_RELOCATABLE) \
- builtin_define ("_RELOCATABLE"); \
- } \
+#define TARGET_OS_SYSV_CPP_BUILTINS() \
+ do \
+ { \
+ if (target_flags_explicit \
+ & MASK_RELOCATABLE) \
+ builtin_define ("_RELOCATABLE"); \
+ } \
while (0)
#ifndef TARGET_OS_CPP_BUILTINS
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- builtin_define_std ("PPC"); \
- builtin_define_std ("unix"); \
- builtin_define ("__svr4__"); \
- builtin_assert ("system=unix"); \
- builtin_assert ("system=svr4"); \
- builtin_assert ("cpu=powerpc"); \
- builtin_assert ("machine=powerpc"); \
- TARGET_OS_SYSV_CPP_BUILTINS (); \
- } \
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define_std ("unix"); \
+ builtin_define ("__svr4__"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=svr4"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
while (0)
#endif
@@ -837,6 +603,10 @@ extern int fixuplabelno;
#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_big)"
+#ifndef CC1_SECURE_PLT_DEFAULT_SPEC
+#define CC1_SECURE_PLT_DEFAULT_SPEC ""
+#endif
+
/* Pass -G xxx to the compiler and set correct endian mode. */
#define CC1_SPEC "%{G*} \
%{mlittle|mlittle-endian: %(cc1_endian_little); \
@@ -849,7 +619,6 @@ extern int fixuplabelno;
mcall-gnu : -mbig %(cc1_endian_big); \
mcall-i960-old : -mlittle %(cc1_endian_little); \
: %(cc1_endian_default)} \
-%{mno-sdata: -msdata=none } \
%{meabi: %{!mcall-*: -mcall-sysv }} \
%{!meabi: %{!mno-eabi: \
%{mrelocatable: -meabi } \
@@ -861,6 +630,7 @@ extern int fixuplabelno;
%{mcall-openbsd: -mno-eabi }}} \
%{msdata: -msdata=default} \
%{mno-sdata: -msdata=none} \
+%{!mbss-plt: %{!msecure-plt: %(cc1_secure_plt_default)}} \
%{profile: -p}"
/* Don't put -Y P,<path> for cross compilers. */
@@ -1089,7 +859,6 @@ extern int fixuplabelno;
#define LINK_OS_FREEBSD_SPEC "\
%{p:%nconsider using `-pg' instead of `-p' with gprof(1)} \
- %{Wl,*:%*} \
%{v:-V} \
%{assert*} %{R*} %{rpath*} %{defsym*} \
%{shared:-Bshareable %{h*} %{soname*}} \
@@ -1107,12 +876,12 @@ extern int fixuplabelno;
#ifdef HAVE_LD_PIE
#define STARTFILE_LINUX_SPEC "\
-%{!shared: %{pg|p:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} \
+%{!shared: %{pg|p|profile:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} \
%{mnewlib:ecrti.o%s;:crti.o%s} \
%{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
#else
#define STARTFILE_LINUX_SPEC "\
-%{!shared: %{pg|p:gcrt1.o%s;:crt1.o%s}} \
+%{!shared: %{pg|p|profile:gcrt1.o%s;:crt1.o%s}} \
%{mnewlib:ecrti.o%s;:crti.o%s} \
%{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
#endif
@@ -1123,9 +892,19 @@ extern int fixuplabelno;
#define LINK_START_LINUX_SPEC ""
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0"
+#if UCLIBC_DEFAULT
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:%{muclibc:%e-mglibc and -muclibc used together}" G ";:" U "}"
+#else
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:%{mglibc:%e-mglibc and -muclibc used together}" U ";:" G "}"
+#endif
+#define LINUX_DYNAMIC_LINKER \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER, UCLIBC_DYNAMIC_LINKER)
+
#define LINK_OS_LINUX_SPEC "-m elf32ppclinux %{!shared: %{!static: \
%{rdynamic:-export-dynamic} \
- %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+ %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER "}}}"
#if defined(HAVE_LD_EH_FRAME_HDR)
# define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
@@ -1237,7 +1016,7 @@ ncrtn.o%s"
/* Override rs6000.h definition. */
#undef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS \
- { "crtsavres_default", CRTSAVRES_DEFAULT_SPEC }, \
+ { "crtsavres_default", CRTSAVRES_DEFAULT_SPEC }, \
{ "lib_ads", LIB_ADS_SPEC }, \
{ "lib_yellowknife", LIB_YELLOWKNIFE_SPEC }, \
{ "lib_mvme", LIB_MVME_SPEC }, \
@@ -1247,7 +1026,7 @@ ncrtn.o%s"
{ "lib_linux", LIB_LINUX_SPEC }, \
{ "lib_netbsd", LIB_NETBSD_SPEC }, \
{ "lib_openbsd", LIB_OPENBSD_SPEC }, \
- { "lib_windiss", LIB_WINDISS_SPEC }, \
+ { "lib_windiss", LIB_WINDISS_SPEC }, \
{ "lib_default", LIB_DEFAULT_SPEC }, \
{ "startfile_ads", STARTFILE_ADS_SPEC }, \
{ "startfile_yellowknife", STARTFILE_YELLOWKNIFE_SPEC }, \
@@ -1258,7 +1037,7 @@ ncrtn.o%s"
{ "startfile_linux", STARTFILE_LINUX_SPEC }, \
{ "startfile_netbsd", STARTFILE_NETBSD_SPEC }, \
{ "startfile_openbsd", STARTFILE_OPENBSD_SPEC }, \
- { "startfile_windiss", STARTFILE_WINDISS_SPEC }, \
+ { "startfile_windiss", STARTFILE_WINDISS_SPEC }, \
{ "startfile_default", STARTFILE_DEFAULT_SPEC }, \
{ "endfile_ads", ENDFILE_ADS_SPEC }, \
{ "endfile_yellowknife", ENDFILE_YELLOWKNIFE_SPEC }, \
@@ -1269,7 +1048,7 @@ ncrtn.o%s"
{ "endfile_linux", ENDFILE_LINUX_SPEC }, \
{ "endfile_netbsd", ENDFILE_NETBSD_SPEC }, \
{ "endfile_openbsd", ENDFILE_OPENBSD_SPEC }, \
- { "endfile_windiss", ENDFILE_WINDISS_SPEC }, \
+ { "endfile_windiss", ENDFILE_WINDISS_SPEC }, \
{ "endfile_default", ENDFILE_DEFAULT_SPEC }, \
{ "link_path", LINK_PATH_SPEC }, \
{ "link_shlib", LINK_SHLIB_SPEC }, \
@@ -1301,6 +1080,7 @@ ncrtn.o%s"
{ "cc1_endian_big", CC1_ENDIAN_BIG_SPEC }, \
{ "cc1_endian_little", CC1_ENDIAN_LITTLE_SPEC }, \
{ "cc1_endian_default", CC1_ENDIAN_DEFAULT_SPEC }, \
+ { "cc1_secure_plt_default", CC1_SECURE_PLT_DEFAULT_SPEC }, \
{ "cpp_os_ads", CPP_OS_ADS_SPEC }, \
{ "cpp_os_yellowknife", CPP_OS_YELLOWKNIFE_SPEC }, \
{ "cpp_os_mvme", CPP_OS_MVME_SPEC }, \
@@ -1310,7 +1090,7 @@ ncrtn.o%s"
{ "cpp_os_linux", CPP_OS_LINUX_SPEC }, \
{ "cpp_os_netbsd", CPP_OS_NETBSD_SPEC }, \
{ "cpp_os_openbsd", CPP_OS_OPENBSD_SPEC }, \
- { "cpp_os_windiss", CPP_OS_WINDISS_SPEC }, \
+ { "cpp_os_windiss", CPP_OS_WINDISS_SPEC }, \
{ "cpp_os_default", CPP_OS_DEFAULT_SPEC }, \
{ "fbsd_dynamic_linker", FBSD_DYNAMIC_LINKER }, \
SUBSUBTARGET_EXTRA_SPECS
@@ -1360,3 +1140,6 @@ ncrtn.o%s"
/* Generate entries in .fixup for relocatable addresses. */
#define RELOCATABLE_NEEDS_FIXUP 1
+
+/* This target uses the sysv4.opt file. */
+#define TARGET_USES_SYSV4_OPT 1
diff --git a/contrib/gcc/config/rs6000/sysv4.opt b/contrib/gcc/config/rs6000/sysv4.opt
new file mode 100644
index 0000000..c483ea5
--- /dev/null
+++ b/contrib/gcc/config/rs6000/sysv4.opt
@@ -0,0 +1,149 @@
+; SYSV4 options for PPC port.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mcall-
+Target RejectNegative Joined
+Select ABI calling convention
+
+msdata=
+Target RejectNegative Joined
+Select method for sdata handling
+
+mtls-size=
+Target RejectNegative Joined
+Specify bit size of immediate TLS offsets
+
+mbit-align
+Target Report Mask(NO_BITFIELD_TYPE)
+Align to the base type of the bit-field
+
+mstrict-align
+Target Report Mask(STRICT_ALIGN)
+Align to the base type of the bit-field
+Don't assume that unaligned accesses are handled by the system
+
+mrelocatable
+Target Report Mask(RELOCATABLE)
+Produce code relocatable at runtime
+
+mrelocatable-lib
+Target
+Produce code relocatable at runtime
+
+mlittle-endian
+Target Report RejectNegative Mask(LITTLE_ENDIAN)
+Produce little endian code
+
+mlittle
+Target Report RejectNegative Mask(LITTLE_ENDIAN) MaskExists
+Produce little endian code
+
+mbig-endian
+Target Report RejectNegative InverseMask(LITTLE_ENDIAN)
+Produce big endian code
+
+mbig
+Target Report RejectNegative InverseMask(LITTLE_ENDIAN)
+Produce big endian code
+
+;; FIXME: This does nothing. What should be done?
+mno-toc
+Target RejectNegative
+no description yet
+
+mtoc
+Target RejectNegative
+no description yet
+
+mprototype
+Target Mask(PROTOTYPE)
+Assume all variable arg functions are prototyped
+
+;; FIXME: Does nothing.
+mno-traceback
+Target RejectNegative
+no description yet
+
+meabi
+Target Report Mask(EABI)
+Use EABI
+
+mbit-word
+Target Report Mask(NO_BITFIELD_WORD)
+Allow bit-fields to cross word boundaries
+
+mregnames
+Target Mask(REGNAMES)
+Use alternate register names
+
+;; FIXME: Does nothing.
+msdata
+Target
+no description yet
+
+msim
+Target RejectNegative
+Link with libsim.a, libc.a and sim-crt0.o
+
+mads
+Target RejectNegative
+Link with libads.a, libc.a and crt0.o
+
+myellowknife
+Target RejectNegative
+Link with libyk.a, libc.a and crt0.o
+
+mmvme
+Target RejectNegative
+Link with libmvme.a, libc.a and crt0.o
+
+memb
+Target RejectNegative
+Set the PPC_EMB bit in the ELF flags header
+
+mwindiss
+Target RejectNegative
+Use the WindISS simulator
+
+mshlib
+Target RejectNegative
+no description yet
+
+m64
+Target Report RejectNegative Mask(64BIT)
+Generate 64-bit code
+
+m32
+Target Report RejectNegative InverseMask(64BIT)
+Generate 32-bit code
+
+mnewlib
+Target RejectNegative
+no description yet
+
+msecure-plt
+Target Report RejectNegative Var(secure_plt, 1)
+Generate code to use a non-exec PLT and GOT
+
+mbss-plt
+Target Report RejectNegative Var(secure_plt, 0)
+Generate code for old exec BSS PLT
diff --git a/contrib/gcc/config/rs6000/sysv4le.h b/contrib/gcc/config/rs6000/sysv4le.h
index 38be8a6..b6964bc 100644
--- a/contrib/gcc/config/rs6000/sysv4le.h
+++ b/contrib/gcc/config/rs6000/sysv4le.h
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_LITTLE_ENDIAN)
diff --git a/contrib/gcc/config/rs6000/t-aix43 b/contrib/gcc/config/rs6000/t-aix43
index 8c2592f..6a73cdd 100644
--- a/contrib/gcc/config/rs6000/t-aix43
+++ b/contrib/gcc/config/rs6000/t-aix43
@@ -46,29 +46,31 @@ SHLIB_EXT = .a
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
@multilib_flags@ @shlib_objs@ -lc \
- `case @shlib_base_name@ in \
+ `case @multilib_dir@ in \
*pthread*) echo -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a ;; \
*) echo -lc ;; esac` ; \
- rm -f tmp-@shlib_base_name@.a ; \
- $(AR_CREATE_FOR_TARGET) tmp-@shlib_base_name@.a @multilib_dir@/shr.o ; \
- mv tmp-@shlib_base_name@.a @shlib_base_name@.a ; \
+ rm -f @multilib_dir@/tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/shr.o ; \
+ mv @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/@shlib_base_name@.a ; \
rm -f @multilib_dir@/shr.o
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
-SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(DESTDIR)$$(slibdir)/
-SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@; \
+ $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.a \
+ $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@/
+SHLIB_LIBS = -lc `case @multilib_dir@ in *pthread*) echo -lpthread ;; esac`
SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
SHLIB_NM_FLAGS = -Bpg -X32_64
# GCC 128-bit long double support routines.
-LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble.c
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c
+TARGET_LIBGCC2_CFLAGS = -mlong-double-128
# Either 32-bit and 64-bit objects in archives.
AR_FLAGS_FOR_TARGET = -X32_64
-# Compile Ada files with minimal-toc. The primary focus is gnatlib, so
-# that the library does not use nearly the entire TOC of applications
-# until gnatlib is built as a shared library on AIX. Compiling the
-# compiler with -mminimal-toc does not cause any harm.
-T_ADAFLAGS = -mminimal-toc
diff --git a/contrib/gcc/config/rs6000/t-aix52 b/contrib/gcc/config/rs6000/t-aix52
index 839bd0a..37a5d83 100644
--- a/contrib/gcc/config/rs6000/t-aix52
+++ b/contrib/gcc/config/rs6000/t-aix52
@@ -27,29 +27,31 @@ SHLIB_EXT = .a
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
@multilib_flags@ @shlib_objs@ -lc \
- `case @shlib_base_name@ in \
+ `case @multilib_dir@ in \
*pthread*) echo -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a ;; \
*) echo -lc ;; esac` ; \
- rm -f tmp-@shlib_base_name@.a ; \
- $(AR_CREATE_FOR_TARGET) tmp-@shlib_base_name@.a @multilib_dir@/shr.o ; \
- mv tmp-@shlib_base_name@.a @shlib_base_name@.a ; \
+ rm -f @multilib_dir@/tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/shr.o ; \
+ mv @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/@shlib_base_name@.a ; \
rm -f @multilib_dir@/shr.o
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
-SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(DESTDIR)$$(slibdir)/
-SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@; \
+ $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.a \
+ $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@/
+SHLIB_LIBS = -lc `case @multilib_dir@ in *pthread*) echo -lpthread ;; esac`
SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
SHLIB_NM_FLAGS = -Bpg -X32_64
# GCC 128-bit long double support routines.
-LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble.c
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c
+TARGET_LIBGCC2_CFLAGS = -mlong-double-128
# Either 32-bit and 64-bit objects in archives.
AR_FLAGS_FOR_TARGET = -X32_64
-# Compile Ada files with minimal-toc. The primary focus is gnatlib, so
-# that the library does not use nearly the entire TOC of applications
-# until gnatlib is built as a shared library on AIX. Compiling the
-# compiler with -mminimal-toc does not cause any harm.
-T_ADAFLAGS = -mminimal-toc
diff --git a/contrib/gcc/config/rs6000/t-beos b/contrib/gcc/config/rs6000/t-beos
index badffef..dc3b89d 100644
--- a/contrib/gcc/config/rs6000/t-beos
+++ b/contrib/gcc/config/rs6000/t-beos
@@ -28,6 +28,3 @@ EXTRA_PARTS = milli.exp
milli.exp: $(srcdir)/config/rs6000/milli.exp
rm -f milli.exp
cp $(srcdir)/config/rs6000/milli.exp ./milli.exp
-
-# Don't use collect.
-USE_COLLECT2 =
diff --git a/contrib/gcc/config/rs6000/t-darwin b/contrib/gcc/config/rs6000/t-darwin
index 185bb00..5ef2872 100644
--- a/contrib/gcc/config/rs6000/t-darwin
+++ b/contrib/gcc/config/rs6000/t-darwin
@@ -1,7 +1,37 @@
-# Add trampoline and long double support to libgcc.
LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-tramp.asm \
- $(srcdir)/config/rs6000/darwin-ldouble.c
+ $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/darwin-64.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c \
+ $(srcdir)/config/rs6000/darwin-world.asm
-# For libgcc, we always want 128-bit long double, since a libgcc built with
-# that will work without it.
-TARGET_LIBGCC2_CFLAGS = -mlong-double-128
+LIB2FUNCS_STATIC_EXTRA = \
+ $(srcdir)/config/rs6000/darwin-fpsave.asm \
+ $(srcdir)/config/rs6000/darwin-vecsave.asm
+
+DARWIN_EXTRA_CRT_BUILD_CFLAGS = -mlongcall
+
+# The .asm files above are designed to run on all processors,
+# even though they use AltiVec instructions. -Wa is used because
+# -force_cpusubtype_ALL doesn't work with -dynamiclib.
+#
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+TARGET_LIBGCC2_CFLAGS = -Wa,-force_cpusubtype_ALL -pipe -mmacosx-version-min=10.4
+
+# Export the _xlq* symbols from darwin-ldouble.c.
+SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc64.ver
+
+SHLIB_VERPFX = $(srcdir)/config/rs6000/darwin-libgcc
+
+LIB2ADDEH += $(srcdir)/config/rs6000/darwin-fallback.c
+
+darwin-fpsave.o: $(srcdir)/config/rs6000/darwin-asm.h
+darwin-tramp.o: $(srcdir)/config/rs6000/darwin-asm.h
+
+# Explain how to build crt2.o
+$(T)crt2$(objext): $(srcdir)/config/darwin-crt2.c $(GCC_PASSES) \
+ $(TCONFIG_H) stmp-int-hdrs tsystem.h
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) \
+ -c $(srcdir)/config/darwin-crt2.c -o $(T)crt2$(objext)
diff --git a/contrib/gcc/config/rs6000/t-darwin8 b/contrib/gcc/config/rs6000/t-darwin8
new file mode 100644
index 0000000..2f3bb32
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-darwin8
@@ -0,0 +1,3 @@
+# 64-bit libraries can only be built in Darwin 8.x or later.
+MULTILIB_OPTIONS = m64
+MULTILIB_DIRNAMES = ppc64
diff --git a/contrib/gcc/config/rs6000/t-fprules b/contrib/gcc/config/rs6000/t-fprules
index 4fb09a2..aa686c1 100644
--- a/contrib/gcc/config/rs6000/t-fprules
+++ b/contrib/gcc/config/rs6000/t-fprules
@@ -1,18 +1,7 @@
-# We want fine grained libraries, so use the new code to build the
-# floating point emulation libraries.
-FPBIT = fp-bit.c
-DPBIT = dp-bit.c
-
-dp-bit.c: $(srcdir)/config/fp-bit.c
- cat $(srcdir)/config/fp-bit.c > dp-bit.c
-
-fp-bit.c: $(srcdir)/config/fp-bit.c
- echo '#define FLOAT' > fp-bit.c
- cat $(srcdir)/config/fp-bit.c >> fp-bit.c
-
MULTILIB_MATCHES_FLOAT = msoft-float=mcpu?401 \
msoft-float=mcpu?403 \
msoft-float=mcpu?405 \
+ msoft-float=mcpu?440 \
msoft-float=mcpu?ec603e \
msoft-float=mcpu?801 \
msoft-float=mcpu?821 \
diff --git a/contrib/gcc/config/rs6000/t-fprules-fpbit b/contrib/gcc/config/rs6000/t-fprules-fpbit
new file mode 100644
index 0000000..a80c1cf
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-fprules-fpbit
@@ -0,0 +1,11 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
diff --git a/contrib/gcc/config/rs6000/t-fprules-softfp b/contrib/gcc/config/rs6000/t-fprules-softfp
new file mode 100644
index 0000000..10b271f
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-fprules-softfp
@@ -0,0 +1,6 @@
+softfp_float_modes := sf df
+softfp_int_modes := si di
+softfp_extensions := sfdf
+softfp_truncations := dfsf
+softfp_machine_header := rs6000/sfp-machine.h
+softfp_exclude_libgcc2 := y
diff --git a/contrib/gcc/config/rs6000/t-linux64 b/contrib/gcc/config/rs6000/t-linux64
index 6d1e6f4..a2c04f9 100644
--- a/contrib/gcc/config/rs6000/t-linux64
+++ b/contrib/gcc/config/rs6000/t-linux64
@@ -1,13 +1,11 @@
#rs6000/t-linux64
-LIB2FUNCS_EXTRA = tramp.S $(srcdir)/config/rs6000/ppc64-fp.c
-LIB2FUNCS_STATIC_EXTRA = eabi.S $(srcdir)/config/rs6000/darwin-ldouble.c
-LIB2FUNCS_SHARED_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble-shared.c
+LIB2FUNCS_EXTRA += tramp.S $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c
+LIB2FUNCS_EXTRA := $(sort $(LIB2FUNCS_EXTRA))
-TARGET_LIBGCC2_CFLAGS = -mno-minimal-toc -fPIC -specs=bispecs
-
-SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc64.ver
+TARGET_LIBGCC2_CFLAGS += -mno-minimal-toc
MULTILIB_OPTIONS = m64/m32 msoft-float
MULTILIB_DIRNAMES = 64 32 nof
@@ -17,29 +15,5 @@ MULTILIB_EXCLUSIONS = m64/!m32/msoft-float
MULTILIB_OSDIRNAMES = ../lib64 ../lib nof
MULTILIB_MATCHES = $(MULTILIB_MATCHES_FLOAT)
-# We want fine grained libraries, so use the new code to build the
-# floating point emulation libraries.
-# fp-bit is only to be used by 32-bit multilibs
-FPBIT = fp-bit32.c
-DPBIT = dp-bit32.c
-
-dp-bit32.c: $(srcdir)/config/fp-bit.c
- ( echo '#ifndef __powerpc64__'; \
- cat $(srcdir)/config/fp-bit.c; \
- echo '#endif' ) > dp-bit32.c
-
-fp-bit32.c: $(srcdir)/config/fp-bit.c
- ( echo '#ifndef __powerpc64__'; \
- echo '#define FLOAT'; \
- cat $(srcdir)/config/fp-bit.c; \
- echo '#endif' ) > fp-bit32.c
-
-# Hack to use -mlong-double-128 just for compiling 64 bit libgcc
-mklibgcc: bispecs
-
-bispecs: specs
- if [ x`$(GCC_FOR_TARGET) -print-multi-os-directory` = x../lib ]; then \
- sed -e '/cc1_options/{ n; s/$$/ %{m64:-mlong-double-128}/; }' < specs > $@; \
- else \
- sed -e '/cc1_options/{ n; s/$$/ %{!m32:-mlong-double-128}/; }' < specs > $@; \
- fi
+softfp_wrap_start := '\#ifndef __powerpc64__'
+softfp_wrap_end := '\#endif'
diff --git a/contrib/gcc/config/rs6000/t-lynx b/contrib/gcc/config/rs6000/t-lynx
new file mode 100644
index 0000000..429f641
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-lynx
@@ -0,0 +1,38 @@
+LIB2FUNCS_EXTRA = tramp.S
+
+tramp.S: $(srcdir)/config/rs6000/tramp.asm
+ cat $(srcdir)/config/rs6000/tramp.asm > tramp.S
+
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+MULTILIB_OPTIONS += msoft-float
+MULTILIB_DIRNAMES += soft-float
+
+MULTILIB_OPTIONS += maltivec
+MULTILIB_DIRNAMES += altivec
+
+MULTILIB_EXCEPTIONS = *msoft-float/*maltivec*
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+# If .sdata is enabled __CTOR_{LIST,END}__ go into .sdata instead of
+# .ctors.
+CRTSTUFF_T_CFLAGS = -mno-sdata
+
+# Compile crtbeginS.o and crtendS.o with pic.
+CRTSTUFF_T_CFLAGS_S = -fPIC -mno-sdata
+
+Local Variables:
+mode: makefile
+End:
diff --git a/contrib/gcc/config/rs6000/t-ppccomm b/contrib/gcc/config/rs6000/t-ppccomm
index eaa3252..b4950f1 100644
--- a/contrib/gcc/config/rs6000/t-ppccomm
+++ b/contrib/gcc/config/rs6000/t-ppccomm
@@ -1,6 +1,6 @@
# Common support for PowerPC ELF targets (both EABI and SVR4).
-LIB2FUNCS_EXTRA = tramp.S
+LIB2FUNCS_EXTRA += tramp.S $(srcdir)/config/rs6000/darwin-ldouble.c
# This one can't end up in shared libgcc
LIB2FUNCS_STATIC_EXTRA = eabi.S
@@ -11,6 +11,14 @@ eabi.S: $(srcdir)/config/rs6000/eabi.asm
tramp.S: $(srcdir)/config/rs6000/tramp.asm
cat $(srcdir)/config/rs6000/tramp.asm > tramp.S
+ifneq (,$(findstring gnu,$(target)))
+ifeq (,$(findstring spe,$(target)))
+TARGET_LIBGCC2_CFLAGS += -mlong-double-128
+
+SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc-glibc.ver
+endif
+endif
+
# Switch synonyms
MULTILIB_MATCHES_ENDIAN = mlittle=mlittle-endian mbig=mbig-endian
MULTILIB_MATCHES_SYSV = mcall-sysv=mcall-sysv-eabi mcall-sysv=mcall-sysv-noeabi mcall-sysv=mcall-linux mcall-sysv=mcall-netbsd
diff --git a/contrib/gcc/config/rs6000/t-rtems b/contrib/gcc/config/rs6000/t-rtems
index 05d9a26..b3db949 100644
--- a/contrib/gcc/config/rs6000/t-rtems
+++ b/contrib/gcc/config/rs6000/t-rtems
@@ -1,87 +1,66 @@
# Multilibs for powerpc RTEMS targets.
MULTILIB_OPTIONS = \
-mcpu=403/mcpu=505/mcpu=601/mcpu=602/mcpu=603/mcpu=603e/mcpu=604/mcpu=750/mcpu=821/mcpu=860 \
-Dmpc509/Dmpc8260 \
-D_OLD_EXCEPTIONS \
+mcpu=403/mcpu=505/mcpu=601/mcpu=603e/mcpu=604/mcpu=860/mcpu=7400 \
+Dmpc8260 \
msoft-float
MULTILIB_DIRNAMES = \
-m403 m505 m601 m602 m603 m603e m604 m750 m821 m860 \
-mpc509 \
+m403 m505 m601 m603e m604 m860 m7400 \
mpc8260 \
-roe \
nof
MULTILIB_EXTRA_OPTS = mrelocatable-lib mno-eabi mstrict-align
# MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
-MULTILIB_MATCHES = ${MULTILIB_MATCHES_ENDIAN} \
- ${MULTILIB_MATCHES_SYSV} \
- mcpu?505/Dmpc505=mcpu?505/Dmpc509
+MULTILIB_MATCHES =
+MULTILIB_MATCHES += ${MULTILIB_MATCHES_ENDIAN}
+MULTILIB_MATCHES += ${MULTILIB_MATCHES_SYSV}
+# Map 405 to 403
+MULTILIB_MATCHES += mcpu?403=mcpu?405
+# Map 602, 603e, 603 to 603e
+MULTILIB_MATCHES += mcpu?603e=mcpu?602
+MULTILIB_MATCHES += mcpu?603e=mcpu?603
+# Map 801, 821, 823 to 860
+MULTILIB_MATCHES += mcpu?860=mcpu?801
+MULTILIB_MATCHES += mcpu?860=mcpu?821
+MULTILIB_MATCHES += mcpu?860=mcpu?823
+# Map 7450 to 7400
+MULTILIB_MATCHES += mcpu?7400=mcpu?7450
-#
-# RTEMS old/new-exceptions handling
-#
-# old-exception processing is depredicated, therefore
-#
-# * Cpu-variants supporting new exception processing are build
-# with new exception processing only
-# * Cpu-variants not having been ported to new exception processing are
-# build with old and new exception processing
-#
-
-# Cpu-variants supporting new exception processing only
-MULTILIB_NEW_EXCEPTIONS_ONLY = \
-*mcpu=505*/*D_OLD_EXCEPTIONS* \
-*mcpu=604*/*D_OLD_EXCEPTIONS* \
-*mcpu=750*/*D_OLD_EXCEPTIONS* \
-*mcpu=821*/*D_OLD_EXCEPTIONS* \
-*Dmpc8260*/*D_OLD_EXCEPTIONS* \
-*mcpu=860*/*D_OLD_EXCEPTIONS*
+# Map 750 to .
+MULTILIB_MATCHES += mcpu?750=
# Soft-float only, default implies msoft-float
# NOTE: Must match with MULTILIB_MATCHES_FLOAT and MULTILIB_MATCHES
MULTILIB_SOFTFLOAT_ONLY = \
-mcpu=403/*msoft-float* \
-mcpu=821/*msoft-float* \
-mcpu=860/*msoft-float*
+*mcpu=401/*msoft-float* \
+*mcpu=403/*msoft-float* \
+*mcpu=405/*msoft-float* \
+*mcpu=801/*msoft-float* \
+*mcpu=821/*msoft-float* \
+*mcpu=823/*msoft-float* \
+*mcpu=860/*msoft-float*
# Hard-float only, take out msoft-float
MULTILIB_HARDFLOAT_ONLY = \
-mcpu=505/*msoft-float*
+*mcpu=505/*msoft-float*
MULTILIB_EXCEPTIONS =
-# Disallow -D_OLD_EXCEPTIONS without other options
-MULTILIB_EXCEPTIONS += D_OLD_EXCEPTIONS*
-
# Disallow -Dppc and -Dmpc without other options
MULTILIB_EXCEPTIONS += Dppc* Dmpc*
MULTILIB_EXCEPTIONS += \
-${MULTILIB_NEW_EXCEPTIONS_ONLY} \
${MULTILIB_SOFTFLOAT_ONLY} \
${MULTILIB_HARDFLOAT_ONLY}
# Special rules
# Take out all variants we don't want
-MULTILIB_EXCEPTIONS += mcpu=403/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=403/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=505/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=505/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=601/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=601/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=602/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=602/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=603/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=603/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=603e/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=604/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=604/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=750/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=750/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=821/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=821/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=860/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=860/Dmpc8260*
+MULTILIB_EXCEPTIONS += *mcpu=403/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=505/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=601/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=604/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=750/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=860/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=7400/Dmpc*
diff --git a/contrib/gcc/config/rs6000/t-vxworks b/contrib/gcc/config/rs6000/t-vxworks
index e89e47b..fe65a39 100644
--- a/contrib/gcc/config/rs6000/t-vxworks
+++ b/contrib/gcc/config/rs6000/t-vxworks
@@ -1,10 +1,11 @@
# Multilibs for VxWorks.
-MULTILIB_OPTIONS = t403/t405/t440/t603/t604/t860
-MULTILIB_DIRNAMES = PPC403gnu PPC405gnu PPC440gnu \
- PPC603gnu PPC604gnu PPC860gnu
+# The base multilib is -mhard-float.
+MULTILIB_OPTIONS = mrtp fPIC msoft-float
+MULTILIB_DIRNAMES =
+MULTILIB_MATCHES = fPIC=fpic
+MULTILIB_EXCEPTIONS = fPIC*
-MULTILIB_MATCHES = t604=
-
-# Put vxlib.c back in LIB2FUNCS_EXTRA (t-ppccomm clobbers it).
-LIB2FUNCS_EXTRA += $(srcdir)/config/vxlib.c
+# Restore some variables from t-vxworks clobbered by t-ppccomm.
+EXTRA_MULTILIB_PARTS =
+LIB2FUNCS_EXTRA = $(srcdir)/config/vxlib.c
diff --git a/contrib/gcc/config/rs6000/t-vxworksae b/contrib/gcc/config/rs6000/t-vxworksae
new file mode 100644
index 0000000..5f68262
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-vxworksae
@@ -0,0 +1,5 @@
+# Multilibs for VxWorks AE.
+
+MULTILIB_OPTIONS = mvthreads msoft-float
+MULTILIB_MATCHES =
+MULTILIB_EXCEPTIONS =
diff --git a/contrib/gcc/config/rs6000/tramp.asm b/contrib/gcc/config/rs6000/tramp.asm
index 284f938..63dacc0 100644
--- a/contrib/gcc/config/rs6000/tramp.asm
+++ b/contrib/gcc/config/rs6000/tramp.asm
@@ -23,8 +23,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
*
* As a special exception, if you link this library with files
* compiled with GCC to produce an executable, this does not cause
@@ -44,7 +44,7 @@
.align 2
trampoline_initial:
mflr r0
- bl 1f
+ bcl 20,31,1f
.Lfunc = .-trampoline_initial
.long 0 /* will be replaced with function address */
.Lchain = .-trampoline_initial
@@ -67,7 +67,7 @@ trampoline_size = .-trampoline_initial
FUNC_START(__trampoline_setup)
mflr r0 /* save return address */
- bl .LCF0 /* load up __trampoline_initial into r7 */
+ bcl 20,31,.LCF0 /* load up __trampoline_initial into r7 */
.LCF0:
mflr r11
addi r7,r11,trampoline_initial-4-.LCF0 /* trampoline address -4 */
@@ -105,6 +105,12 @@ FUNC_START(__trampoline_setup)
blr
.Labort:
+#if defined SHARED && defined HAVE_AS_REL16
+ bcl 20,31,1f
+1: mflr r30
+ addis r30,r30,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r30,r30,_GLOBAL_OFFSET_TABLE_-1b@l
+#endif
bl JUMP_TARGET(abort)
FUNC_END(__trampoline_setup)
diff --git a/contrib/gcc/config/rs6000/vxworks.h b/contrib/gcc/config/rs6000/vxworks.h
index e7a7092..273a435 100644
--- a/contrib/gcc/config/rs6000/vxworks.h
+++ b/contrib/gcc/config/rs6000/vxworks.h
@@ -1,32 +1,56 @@
/* Definitions of target machine for GNU compiler. Vxworks PowerPC version.
- Copyright (C) 1996, 2000, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1996, 2000, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
- This file is part of GCC.
+This file is part of GCC.
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 2, or (at your
- option) any later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
- GCC is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- License for more details.
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
-#undef TARGET_OS_CPP_BUILTINS
+/* Note to future editors: VxWorks is mostly an EABI target. We do
+ not use rs6000/eabi.h because we would have to override most of
+ it anyway. However, if you change that file, consider making
+ analogous changes here too. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC VxWorks)");
+
+/* CPP predefined macros. */
+
+#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
+ builtin_define ("__ppc"); \
+ builtin_define ("__EABI__"); \
+ builtin_define ("__ELF__"); \
builtin_define ("__vxworks"); \
- builtin_define ("__vxworks__"); \
+ builtin_define ("__VXWORKS__"); \
+ if (!TARGET_SOFT_FLOAT) \
+ builtin_define ("__hardfp"); \
+ \
+ /* C89 namespace violation! */ \
+ builtin_define ("CPU_FAMILY=PPC"); \
} \
while (0)
+/* Only big endian PPC is supported by VxWorks. */
+#undef BYTES_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN 1
+
/* We have to kill off the entire specs set created by rs6000/sysv4.h
and substitute our own set. The top level vxworks.h has done some
of this for us. */
@@ -38,45 +62,92 @@
#define SUBTARGET_EXTRA_SPECS /* none needed */
+/* FIXME: The only reason we allow no -mcpu switch at all is because
+ config-ml.in insists on a "." multilib. */
#define CPP_SPEC \
-"-DCPU_FAMILY=PPC -D__ppc -D__EABI__ \
- %{t403: -DCPU=PPC403 -D_SOFT_FLOAT ; \
- t405: -DCPU=PPC405 -D_SOFT_FLOAT ; \
- t440: -DCPU=PPC440 -D_SOFT_FLOAT ; \
- t603: -DCPU=PPC603 ; \
- t604: -DCPU=PPC604 ; \
- t860: -DCPU=PPC860 -D_SOFT_FLOAT ; \
- : -DCPU=PPC604} \
- %{!msoft-float:-D__hardfp} \
- %{fpic|fpie: -D__PIC__=1 -D__pic__=1 ; \
- fPIC|fPIE: -D__PIC__=2 -D__pic__=2 } \
- %(cpp_cpu)"
-
-#define CC1_SPEC \
-"%{t403: -mcpu=403 -mstrict-align ; \
- t405: -mcpu=405 -mstrict-align ; \
- t440: -mcpu=440 -mstrict-align ; \
- t603: -mcpu=603 -mstrict-align ; \
- t604: -mcpu=604 -mstrict-align ; \
- t860: -mcpu=860 ; \
- : -mcpu=604 -mstrict-align } \
- %{G*} %{mno-sdata:-msdata=none} %{msdata:-msdata=default} \
+"%{!DCPU=*: \
+ %{mcpu=403 : -DCPU=PPC403 ; \
+ mcpu=405 : -DCPU=PPC405 ; \
+ mcpu=440 : -DCPU=PPC440 ; \
+ mcpu=603 : -DCPU=PPC603 ; \
+ mcpu=604 : -DCPU=PPC604 ; \
+ mcpu=860 : -DCPU=PPC860 ; \
+ mcpu=8540: -DCPU=PPC85XX ; \
+ : -DCPU=PPC604 }}" \
+VXWORKS_ADDITIONAL_CPP_SPEC
+
+#define CC1_SPEC \
+"%{G*} %{mno-sdata:-msdata=none} %{msdata:-msdata=default} \
%{mlittle|mlittle-endian:-mstrict-align} \
- %{profile: -p} \
+ %{profile: -p} \
%{fvec:-maltivec} %{fvec-eabi:-maltivec -mabi=altivec}"
-
-#define ASM_SPEC "%(asm_cpu) \
-%{.s: %{mregnames} %{mno-regnames}} %{.S: %{mregnames} %{mno-regnames}} \
-%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \
-%{mrelocatable} %{mrelocatable-lib} %{fpic:-K PIC} %{fPIC:-K PIC} -mbig"
-
-#undef MULTILIB_DEFAULTS
-#define MULTILIB_DEFAULTS { "t604" }
-
-/* We can't use .ctors/.dtors sections. */
-#undef TARGET_ASM_OUTPUT_CONSTRUCTOR
-#undef TARGET_ASM_OUTPUT_DESTRUCTOR
-
-/* Nor sdata. */
-#undef SDATA_DEFAULT_SIZE
-#define SDATA_DEFAULT_SIZE 0
+
+#define ASM_SPEC \
+"%(asm_cpu) \
+ %{.s: %{mregnames} %{mno-regnames}} %{.S: %{mregnames} %{mno-regnames}} \
+ %{v:-v} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \
+ %{mrelocatable} %{mrelocatable-lib} %{fpic:-K PIC} %{fPIC:-K PIC} -mbig"
+
+#undef LIB_SPEC
+#define LIB_SPEC VXWORKS_LIB_SPEC
+#undef LINK_SPEC
+#define LINK_SPEC VXWORKS_LINK_SPEC
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC VXWORKS_STARTFILE_SPEC
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC VXWORKS_ENDFILE_SPEC
+
+/* There is no default multilib. */
+#undef MULTILIB_DEFAULTS
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI | MASK_STRICT_ALIGN)
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC604
+
+/* Nor sdata, for kernel mode. We use this in
+ SUBSUBTARGET_INITIALIZE_OPTIONS, after rs6000_rtp has been initialized. */
+#undef SDATA_DEFAULT_SIZE
+#define SDATA_DEFAULT_SIZE (TARGET_VXWORKS_RTP ? 8 : 0)
+
+#undef STACK_BOUNDARY
+#define STACK_BOUNDARY (16*BITS_PER_UNIT)
+/* Override sysv4.h, reset to the default. */
+#undef PREFERRED_STACK_BOUNDARY
+
+/* Enable SPE */
+#undef TARGET_SPE_ABI
+#undef TARGET_SPE
+#undef TARGET_E500
+#undef TARGET_ISEL
+#undef TARGET_FPRS
+
+#define TARGET_SPE_ABI rs6000_spe_abi
+#define TARGET_SPE rs6000_spe
+#define TARGET_E500 (rs6000_cpu == PROCESSOR_PPC8540)
+#define TARGET_ISEL rs6000_isel
+#define TARGET_FPRS (!rs6000_float_gprs)
+
+/* Make -mcpu=8540 imply SPE. ISEL is automatically enabled, the
+ others must be done by hand. Handle -mrtp. Disable -fPIC
+ for -mrtp - the VxWorks PIC model is not compatible with it. */
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS \
+ do { \
+ if (TARGET_E500) \
+ { \
+ rs6000_spe = 1; \
+ rs6000_spe_abi = 1; \
+ rs6000_float_gprs = 1; \
+ } \
+ \
+ if (!g_switch_set) \
+ g_switch_value = SDATA_DEFAULT_SIZE; \
+ VXWORKS_OVERRIDE_OPTIONS; \
+ } while (0)
+
+/* No _mcount profiling on VxWorks. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE,LABELNO) VXWORKS_FUNCTION_PROFILER(FILE,LABELNO)
diff --git a/contrib/gcc/config/rs6000/vxworksae.h b/contrib/gcc/config/rs6000/vxworksae.h
new file mode 100644
index 0000000..814b969
--- /dev/null
+++ b/contrib/gcc/config/rs6000/vxworksae.h
@@ -0,0 +1,24 @@
+/* PowerPC VxWorks AE target definitions for GNU compiler.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC VxWorks AE)");
+
diff --git a/contrib/gcc/config/rs6000/windiss.h b/contrib/gcc/config/rs6000/windiss.h
index e66d128..f4f1665 100644
--- a/contrib/gcc/config/rs6000/windiss.h
+++ b/contrib/gcc/config/rs6000/windiss.h
@@ -16,8 +16,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (PowerPC WindISS)");
diff --git a/contrib/gcc/config/rs6000/x-darwin b/contrib/gcc/config/rs6000/x-darwin
index f7242a7..033ab6b 100644
--- a/contrib/gcc/config/rs6000/x-darwin
+++ b/contrib/gcc/config/rs6000/x-darwin
@@ -1,4 +1,4 @@
-host-darwin.o : $(srcdir)/config/rs6000/host-darwin.c $(CONFIG_H) $(SYSTEM_H) \
- coretypes.h hosthooks.h hosthooks-def.h toplev.h diagnostic.h
- $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
- $(srcdir)/config/rs6000/host-darwin.c
+host-ppc-darwin.o : $(srcdir)/config/rs6000/host-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) toplev.h \
+ config/host-darwin.h $(DIAGNOSTIC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
diff --git a/contrib/gcc/config/rs6000/x-darwin64 b/contrib/gcc/config/rs6000/x-darwin64
new file mode 100644
index 0000000..3cb423d
--- /dev/null
+++ b/contrib/gcc/config/rs6000/x-darwin64
@@ -0,0 +1,4 @@
+host-ppc64-darwin.o : $(srcdir)/config/rs6000/host-ppc64-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) toplev.h \
+ config/host-darwin.h $(DIAGNOSTIC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
diff --git a/contrib/gcc/config/rs6000/xcoff.h b/contrib/gcc/config/rs6000/xcoff.h
index d4e056b..ebf79b5 100644
--- a/contrib/gcc/config/rs6000/xcoff.h
+++ b/contrib/gcc/config/rs6000/xcoff.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for some generic XCOFF file format
- Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of GCC.
@@ -16,8 +16,8 @@
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
#define TARGET_OBJECT_FORMAT OBJECT_XCOFF
@@ -56,92 +56,10 @@
#define DOLLARS_IN_IDENTIFIERS 0
-/* Define the extra sections we need. We define three: one is the read-only
- data section which is used for constants. This is a csect whose name is
- derived from the name of the input file. The second is for initialized
- global variables. This is a csect whose name is that of the variable.
- The third is the TOC. */
-
-#define EXTRA_SECTIONS \
- read_only_data, private_data, read_only_private_data, toc, bss
-
-/* Define the routines to implement these extra sections.
- BIGGEST_ALIGNMENT is 64, so align the sections that much. */
-
-#define EXTRA_SECTION_FUNCTIONS \
- READ_ONLY_DATA_SECTION_FUNCTION \
- PRIVATE_DATA_SECTION_FUNCTION \
- READ_ONLY_PRIVATE_DATA_SECTION_FUNCTION \
- TOC_SECTION_FUNCTION
-
-#define READ_ONLY_DATA_SECTION_FUNCTION \
-void \
-read_only_data_section (void) \
-{ \
- if (in_section != read_only_data) \
- { \
- fprintf (asm_out_file, "\t.csect %s[RO],3\n", \
- xcoff_read_only_section_name); \
- in_section = read_only_data; \
- } \
-}
+/* AIX .align pseudo-op accept value from 0 to 12, corresponding to
+ log base 2 of the alignment in bytes; 12 = 4096 bytes = 32768 bits. */
-#define PRIVATE_DATA_SECTION_FUNCTION \
-void \
-private_data_section (void) \
-{ \
- if (in_section != private_data) \
- { \
- fprintf (asm_out_file, "\t.csect %s[RW],3\n", \
- xcoff_private_data_section_name); \
- in_section = private_data; \
- } \
-}
-
-#define READ_ONLY_PRIVATE_DATA_SECTION_FUNCTION \
-void \
-read_only_private_data_section (void) \
-{ \
- if (in_section != read_only_private_data) \
- { \
- fprintf (asm_out_file, "\t.csect %s[RO],3\n", \
- xcoff_private_data_section_name); \
- in_section = read_only_private_data; \
- } \
-}
-
-#define TOC_SECTION_FUNCTION \
-void \
-toc_section (void) \
-{ \
- if (TARGET_MINIMAL_TOC) \
- { \
- /* toc_section is always called at least once \
- from rs6000_xcoff_file_start, so this is \
- guaranteed to always be defined once and \
- only once in each file. */ \
- if (! toc_initialized) \
- { \
- fputs ("\t.toc\nLCTOC..1:\n", asm_out_file); \
- fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file); \
- toc_initialized = 1; \
- } \
- \
- if (in_section != toc) \
- fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n", \
- (TARGET_32BIT ? "" : ",3")); \
- } \
- else \
- { \
- if (in_section != toc) \
- fputs ("\t.toc\n", asm_out_file); \
- } \
- in_section = toc; \
-}
-
-/* Define the name of our readonly data section. */
-
-#define READONLY_DATA_SECTION read_only_data_section
+#define MAX_OFILE_ALIGNMENT 32768
/* Return nonzero if this entry is to be written into the constant
pool in a special way. We do so if this is a SYMBOL_REF, LABEL_REF
@@ -164,14 +82,18 @@ toc_section (void) \
|| (GET_CODE (X) == CONST_DOUBLE \
&& (TARGET_POWERPC64 \
|| TARGET_MINIMAL_TOC \
- || (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ || (SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
&& ! TARGET_NO_FP_IN_TOC)))))
+#define TARGET_ASM_OUTPUT_ANCHOR rs6000_xcoff_asm_output_anchor
#define TARGET_ASM_GLOBALIZE_LABEL rs6000_xcoff_asm_globalize_label
+#define TARGET_ASM_INIT_SECTIONS rs6000_xcoff_asm_init_sections
+#define TARGET_ASM_RELOC_RW_MASK rs6000_xcoff_reloc_rw_mask
#define TARGET_ASM_NAMED_SECTION rs6000_xcoff_asm_named_section
#define TARGET_ASM_SELECT_SECTION rs6000_xcoff_select_section
#define TARGET_ASM_SELECT_RTX_SECTION rs6000_xcoff_select_rtx_section
#define TARGET_ASM_UNIQUE_SECTION rs6000_xcoff_unique_section
+#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
#define TARGET_STRIP_NAME_ENCODING rs6000_xcoff_strip_name_encoding
#define TARGET_SECTION_TYPE_FLAGS rs6000_xcoff_section_type_flags
@@ -212,9 +134,8 @@ toc_section (void) \
On the RS/6000, we need to place an extra '.' in the function name and
output the function descriptor.
- The csect for the function will have already been created by the
- `text_section' call previously done. We do have to go back to that
- csect, however.
+ The csect for the function will have already been created when
+ text_section was selected. We do have to go back to that csect, however.
The third and fourth parameters to the .function pseudo-op (16 and 044)
are placeholders which no longer have any use. */
@@ -243,8 +164,8 @@ toc_section (void) \
fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", FILE); \
RS6000_OUTPUT_BASENAME (FILE, NAME); \
fputs (", TOC[tc0], 0\n", FILE); \
- in_section = no_section; \
- function_section(DECL); \
+ in_section = NULL; \
+ switch_to_section (function_section (DECL)); \
putc ('.', FILE); \
RS6000_OUTPUT_BASENAME (FILE, NAME); \
fputs (":\n", FILE); \
@@ -361,10 +282,6 @@ toc_section (void) \
Align entire section to BIGGEST_ALIGNMENT. */
#define DATA_SECTION_ASM_OP "\t.csect .data[RW],3"
-/* Define the name of the section to use for the EH language specific
- data areas (.gcc_except_table on most other systems). */
-#define TARGET_ASM_EXCEPTION_SECTION data_section
-
/* Define to prevent DWARF2 unwind info in the data section rather
than in the .eh_frame section. We do this because the AIX linker
would otherwise garbage collect these sections. */
OpenPOWER on IntegriCloud