[mingw-gcc: 6/36] - New native Fedora version gcc 4.4.0 20090319 svn 144967. - Enable _smp_mflags.

epienbro epienbro at fedoraproject.org
Tue Mar 6 19:18:52 UTC 2012


commit c007570a2c83eea7a1f65d8e3afd420b33a7123a
Author: Richard W.M. Jones <rjones at fedoraproject.org>
Date:   Mon Mar 23 10:56:41 2009 +0000

    - New native Fedora version gcc 4.4.0 20090319 svn 144967.
    - Enable _smp_mflags.

 .cvsignore                     |    4 +-
 gcc44-atom.patch               | 2832 ++++++++++++
 gcc44-c++-builtin-redecl.patch |    6 +-
 gcc44-cloog-dl.patch           |   46 +-
 gcc44-diff.patch               |  353 --
 gcc44-memmove-opt.patch        |  128 +
 gcc44-power7-2.patch           | 1369 ++++++
 gcc44-power7.patch             | 9513 ++++++++++++++++++++++++++++++++++++++++
 gcc44-pr37959.patch            |  146 +
 gcc44-pr38757.patch            |  129 +
 gcc44-pr39175.patch            |  133 -
 gcc44-pr39226.patch            |  108 +
 gcc44-raw-string.patch         |   15 +-
 mingw32-gcc.spec               |   31 +-
 sources                        |    4 +-
 15 files changed, 14264 insertions(+), 553 deletions(-)
---
diff --git a/.cvsignore b/.cvsignore
index 3ffde1f..12497ff 100644
--- a/.cvsignore
+++ b/.cvsignore
@@ -1,3 +1 @@
-fastjar-0.97.tar.gz
-gcc-4.4.0-20090216.tar.bz2
-cloog-ppl-0.15.tar.gz
+gcc-4.4.0-20090319.tar.bz2
diff --git a/gcc44-atom.patch b/gcc44-atom.patch
new file mode 100644
index 0000000..e122e46
--- /dev/null
+++ b/gcc44-atom.patch
@@ -0,0 +1,2832 @@
+2009-02-05  Joey Ye  <joey.ye at intel.com>
+	    Xuepeng Guo <xuepeng.guo at intel.com>
+	    H.J. Lu  <hongjiu.lu at intel.com>
+
+	Atom pipeline model, tuning and insn selection.
+	* rtlanal.c (reg_mentioned_by_mem_p_1): New function.
+	(reg_mentioned_by_mem_p): New function.
+	(reg_dep_by_addr_p): New function.
+
+	* rtl.h (reg_mentioned_by_mem_p): Declare new function.
+	(reg_dep_by_addr_p): Likewise.
+
+	* config.gcc (atom): Add atom config options and target.
+
+	* config/i386/i386.h (TARGET_ATOM): New target macro.
+	(X86_TUNE_OPT_AGU): New tuning flag.
+	(TARGET_OPT_AGU): New target option.
+	(TARGET_CPU_DEFAULT_atom): New CPU default.
+	(PROCESSOR_ATOM): New processor.
+
+	* config/i386/i386-c.c (ix86_target_macros_internal): New case
+	PROCESSOR_ATOM.
+	(ix86_target_macros_internal): Likewise.
+
+	* config/i386/i386-protos.h (ix86_lea_for_add_ok): Declare new
+	function.
+	(ix86_dep_by_shift_count): Likewise.
+	(ix86_agi_dependent): Likewise.
+
+	* config/i386/i386.c (atom_cost): New cost.
+	(m_ATOM): New macro flag.
+	(initial_ix86_tune_fe): Set m_ATOM.
+	(x86_accumulate_outgoing_args): Likewise.
+	(x86_arch_always_fancy_math_387): Likewise.
+	(processor_target): Add Atom cost.
+	(cpu_names): Add Atom cpu name.
+	(override_options): Set Atom ISA.
+	(LEA_SEARCH_THRESHOLD): New macro.
+	(distance_non_agu_define): New function.
+	(distance_agu_use): Likewise.
+	(ix86_lea_for_add_ok): Likewise.
+	(ix86_dep_by_shift_count): Likewise.
+	(ix86_agi_dependent): Make it global.
+	(ix86_issue_rate): New case PROCESSOR_ATOM.
+	(ix86_adjust_cost): Likewise.
+
+	* config/i386/i386.md (cpu): Add new value "atom".
+	(atom.md): Include atom.md.
+	(use_carry, movu): New attr.
+	(adddi3_carry_rex64): Set attr "use_carry".
+	(addqi3_carry): Likewise.
+	(addhi3_carry): Likewise.
+	(addsi3_carry): Likewise.
+	(*addsi3_carry_zext): Likewise.
+	(subdi3_carry_rex64): Likewise.
+	(subqi3_carry): Likewise.
+	(subhi3_carry): Likewise.
+	(subsi3_carry): Likewise.
+	(x86_movdicc_0_m1_rex64): Likewise.
+	(*x86_movdicc_0_m1_se): Likewise.
+	(x86_movsicc_0_m1): Likewise.
+	(*x86_movsicc_0_m1_se): Likewise.
+	(*adddi_1_rex64): Emit add insn as much as possible.
+	(*addsi_1): Likewise.
+	(return_internal): Set atom_unit.
+	(return_internal_long): Likewise.
+	(return_pop_internal): Likewise.
+	(*rcpsf2_sse): Set atom_sse_attr attr.
+	(*qrt<mode>2_sse): Likewise.
+	(*prefetch_sse): Likewise.
+
+	* config/i386/sse.md (cpu): Set attr "atom_sse_attr".
+	(*prefetch_sse_rex): Likewise.
+	(sse_rcpv4sf2): Likewise.
+	(sse_vmrcpv4sf2): Likewise.
+	(sse_sqrtv4sf2): Likewise.
+	(<sse>_vmsqrt<mode>2): Likewise.
+	(sse_ldmxcsr): Likewise.
+	(sse_stmxcsr): Likewise.
+	(*sse_sfence): Likewise.
+	(sse2_clflush): Likewise.
+	(*sse2_mfence): Likewise.
+	(*sse2_lfence): Likewise.
+	(avx_movup<avxmodesuffixf2c><avxmodesuffix>): Set attr "movu".
+	(<sse>_movup<ssemodesuffixf2c>): Likewise.
+	(avx_movdqu<avxmodesuffix>): Likewise.
+	(avx_lddqu<avxmodesuffix>): Likewise.
+	(sse2_movntv2di): Change attr "type" to "ssemov".
+	(sse2_movntsi): Likewise.
+	(rsqrtv8sf2): Change attr "type" to "sseadd".
+	(sse3_addsubv2df3): Set attr "atom_unit".
+	(sse3_h<plusminus_insn>v4sf3): Likewise.
+	(*sse2_pmaddwd): Likewise.
+	(*vec_extractv2di_1_rex64): Likewise.
+	(*vec_extractv2di_1_avx): Likewise.
+	(sse2_psadbw): Likewise.
+	(ssse3_phaddwv8hi3): Likewise.
+	(ssse3_phaddwv4hi3): Likewise.
+	(ssse3_phadddv4si3): Likewise.
+	(ssse3_phadddv2si3): Likewise.
+	(ssse3_phaddswv8hi3): Likewise.
+	(ssse3_phaddswv4hi3): Likewise.
+	(ssse3_phsubwv8hi3): Likewise.
+	(ssse3_phsubwv4hi3): Likewise.
+	(ssse3_phsubdv4si3): Likewise.
+	(ssse3_phsubdv2si3): Likewise.
+	(ssse3_phsubswv8hi3): Likewise.
+	(ssse3_phsubswv4hi3): Likewise.
+	(ssse3_pmaddubsw128): Likewise.
+	(sse3_pmaddubsw: Likewise.
+	(ssse3_palignrti): Likewise.
+	(ssse3_palignrdi): Likewise.
+
+	* config/i386/atom.md: New.
+
+2009-02-05  H.J. Lu  <hongjiu.lu at intel.com>
+
+	* config/i386/i386.c (ix86_agi_dependent): Remove the third
+	argument.  Swap the first 2 arguments.
+	(ix86_adjust_cost): Updated.
+
+2009-01-30  Vladimir Makarov  <vmakarov at redhat.com>
+
+	* genautomata.c: Add a new year to the copyright.  Add a new
+	reference.
+	(struct insn_reserv_decl): Add comments for member bypass_list.
+	(find_bypass): Remove.
+	(insert_bypass): New.
+	(process_decls): Use insert_bypass.
+	(output_internal_insn_latency_func): Output all bypasses with the
+	same input insn in one switch case.
+
+	* rtl.def (define_bypass): Describe bypass choice.
+	* doc/md.texi (define_bypass): Ditto.
+
+--- gcc/doc/md.texi	(.../trunk)	(revision 144460)
++++ gcc/doc/md.texi	(.../branches/ix86/atom)	(revision 144601)
+@@ -7506,6 +7506,11 @@ be ignored for this case.  The additiona
+ recognize complicated bypasses, e.g.@: when the consumer is only an address
+ of insn @samp{store} (not a stored value).
+ 
++If there are more one bypass with the same output and input insns, the
++chosen bypass is the first bypass with a guard in description whose
++guard function returns nonzero.  If there is no such bypass, then
++bypass without the guard function is chosen.
++
+ @findex exclusion_set
+ @findex presence_set
+ @findex final_presence_set
+--- gcc/rtlanal.c	(.../trunk)	(revision 144460)
++++ gcc/rtlanal.c	(.../branches/ix86/atom)	(revision 144601)
+@@ -728,6 +728,129 @@ reg_mentioned_p (const_rtx reg, const_rt
+     }
+   return 0;
+ }
++
++static int
++reg_mentioned_by_mem_p_1 (const_rtx reg, const_rtx in,
++			  bool *mem_p)
++{
++  const char *fmt;
++  int i;
++  enum rtx_code code;
++
++  if (in == 0)
++    return 0;
++
++  if (reg == in)
++    return 1;
++
++  if (GET_CODE (in) == LABEL_REF)
++    return reg == XEXP (in, 0);
++
++  code = GET_CODE (in);
++
++  switch (code)
++    {
++      /* Compare registers by number.  */
++    case REG:
++      return REG_P (reg) && REGNO (in) == REGNO (reg);
++
++      /* These codes have no constituent expressions
++	 and are unique.  */
++    case SCRATCH:
++    case CC0:
++    case PC:
++      return 0;
++
++    case CONST_INT:
++    case CONST_VECTOR:
++    case CONST_DOUBLE:
++    case CONST_FIXED:
++      /* These are kept unique for a given value.  */
++      return 0;
++
++    default:
++      break;
++    }
++
++  if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
++    return 1;
++
++  fmt = GET_RTX_FORMAT (code);
++
++  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
++    {
++      if (fmt[i] == 'E')
++	{
++	  int j;
++	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
++	    if (reg_mentioned_by_mem_p_1 (reg, XVECEXP (in, i, j), mem_p))
++              {
++                if (code == MEM)
++                  *mem_p = true;
++
++	        return 1;
++              }
++	}
++      else if (fmt[i] == 'e'
++	       && reg_mentioned_by_mem_p_1 (reg, XEXP (in, i), mem_p))
++	{
++	  if (code == MEM)
++	    *mem_p = true;
++
++	  return 1;
++	}
++    }
++  return 0;
++}
++
++/* Similar to the function reg_mentioned_p, return true only when
++   register REG appears in a MEM container of RTX IN.  */
++
++bool
++reg_mentioned_by_mem_p (const_rtx reg, const_rtx in)
++{
++  bool mem = false;
++
++  reg_mentioned_by_mem_p_1 (reg, in, &mem);
++  return mem;
++}
++
++/* Return true if dest regsiter in set_insn is used in use_insn as 
++   address calculation.
++   For example, returns true if 
++     set_insn: reg_a = reg_b
++     use_insn: reg_c = (reg_a) # reg_a used in addr calculation
++   False if
++     set_insn: reg_a = reg_b
++     use_insn: (reg_c) = reg_a # reg_a is used, by not as addr.  */
++
++bool
++reg_dep_by_addr_p (const_rtx set_insn, const_rtx use_insn)
++{
++  rtx pattern = PATTERN (set_insn);
++  rtx set_dest = NULL;
++
++  switch (GET_CODE (pattern))
++    {
++      case SET:
++        set_dest = SET_DEST (pattern);
++        break;
++      case PARALLEL:
++        {
++          rtx pattern2 = XVECEXP (PATTERN (set_insn), 0,0);
++  	  if (GET_CODE (pattern2) == SET)
++  	    set_dest = SET_DEST (pattern2);
++          break;
++        }
++      default:
++        set_dest = NULL;
++    }
++
++  /* True if destination of set is reg and used as address.  */
++  return set_dest && REG_P (set_dest) 
++         && reg_mentioned_by_mem_p (set_dest, use_insn);
++}
++
+ 
+ /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
+    no CODE_LABEL insn.  */
+--- gcc/genautomata.c	(.../trunk)	(revision 144460)
++++ gcc/genautomata.c	(.../branches/ix86/atom)	(revision 144601)
+@@ -22,21 +22,25 @@ along with GCC; see the file COPYING3.  
+ 
+ /* References:
+ 
+-   1. Detecting pipeline structural hazards quickly. T. Proebsting,
++   1. The finite state automaton based pipeline hazard recognizer and
++      instruction scheduler in GCC.  V. Makarov.  Proceedings of GCC
++      summit, 2003.
++
++   2. Detecting pipeline structural hazards quickly. T. Proebsting,
+       C. Fraser. Proceedings of ACM SIGPLAN-SIGACT Symposium on
+       Principles of Programming Languages, pages 280--286, 1994.
+ 
+       This article is a good start point to understand usage of finite
+       state automata for pipeline hazard recognizers.  But I'd
+-      recommend the 2nd article for more deep understanding.
++      recommend the 1st and 3rd article for more deep understanding.
+ 
+-   2. Efficient Instruction Scheduling Using Finite State Automata:
++   3. Efficient Instruction Scheduling Using Finite State Automata:
+       V. Bala and N. Rubin, Proceedings of MICRO-28.  This is the best
+       article about usage of finite state automata for pipeline hazard
+       recognizers.
+ 
+-   The current implementation is different from the 2nd article in the
+-   following:
++   The current implementation is described in the 1st article and it
++   is different from the 3rd article in the following:
+ 
+    1. New operator `|' (alternative) is permitted in functional unit
+       reservation which can be treated deterministically and
+@@ -463,7 +467,10 @@ struct insn_reserv_decl
+      insn.  */
+   int insn_num;
+   /* The following field value is list of bypasses in which given insn
+-     is output insn.  */
++     is output insn.  Bypasses with the same input insn stay one after
++     another in the list in the same order as their occurrences in the
++     description but the bypass without a guard stays always the last
++     in a row of bypasses with the same input insn.  */
+   struct bypass_decl *bypass_list;
+ 
+   /* The following fields are defined by automaton generator.  */
+@@ -2367,18 +2374,67 @@ add_presence_absence (unit_set_el_t dest
+ }
+ 
+ 
+-/* The function searches for bypass with given IN_INSN_RESERV in given
+-   BYPASS_LIST.  */
+-static struct bypass_decl *
+-find_bypass (struct bypass_decl *bypass_list,
+-	     struct insn_reserv_decl *in_insn_reserv)
+-{
+-  struct bypass_decl *bypass;
+-
+-  for (bypass = bypass_list; bypass != NULL; bypass = bypass->next)
+-    if (bypass->in_insn_reserv == in_insn_reserv)
+-      break;
+-  return bypass;
++/* The function inserts BYPASS in the list of bypasses of the
++   corresponding output insn.  The order of bypasses in the list is
++   decribed in a comment for member `bypass_list' (see above).  If
++   there is already the same bypass in the list the function reports
++   this and does nothing.  */
++static void
++insert_bypass (struct bypass_decl *bypass)
++{
++  struct bypass_decl *curr, *last;
++  struct insn_reserv_decl *out_insn_reserv = bypass->out_insn_reserv;
++  struct insn_reserv_decl *in_insn_reserv = bypass->in_insn_reserv;
++  
++  for (curr = out_insn_reserv->bypass_list, last = NULL;
++       curr != NULL;
++       last = curr, curr = curr->next)
++    if (curr->in_insn_reserv == in_insn_reserv)
++      {
++	if ((bypass->bypass_guard_name != NULL
++	     && curr->bypass_guard_name != NULL
++	     && ! strcmp (bypass->bypass_guard_name, curr->bypass_guard_name))
++	    || bypass->bypass_guard_name == curr->bypass_guard_name)
++	  {
++	    if (bypass->bypass_guard_name == NULL)
++	      {
++		if (!w_flag)
++		  error ("the same bypass `%s - %s' is already defined",
++			 bypass->out_insn_name, bypass->in_insn_name);
++		else
++		  warning (0, "the same bypass `%s - %s' is already defined",
++			   bypass->out_insn_name, bypass->in_insn_name);
++	      }
++	    else if (!w_flag)
++	      error ("the same bypass `%s - %s' (guard %s) is already defined",
++		     bypass->out_insn_name, bypass->in_insn_name,
++		     bypass->bypass_guard_name);
++	    else
++	      warning
++		(0, "the same bypass `%s - %s' (guard %s) is already defined",
++		 bypass->out_insn_name, bypass->in_insn_name,
++		 bypass->bypass_guard_name);
++	    return;
++	  }
++	if (curr->bypass_guard_name == NULL)
++	  break;
++	if (curr->next == NULL || curr->next->in_insn_reserv != in_insn_reserv)
++	  {
++	    last = curr;
++	    break;
++	  }
++	  
++      }
++  if (last == NULL)
++    {
++      bypass->next = out_insn_reserv->bypass_list;
++      out_insn_reserv->bypass_list = bypass;
++    }
++  else
++    {
++      bypass->next = last->next;
++      last->next = bypass;
++    }
+ }
+ 
+ /* The function processes pipeline description declarations, checks
+@@ -2391,7 +2447,6 @@ process_decls (void)
+   decl_t decl_in_table;
+   decl_t out_insn_reserv;
+   decl_t in_insn_reserv;
+-  struct bypass_decl *bypass;
+   int automaton_presence;
+   int i;
+ 
+@@ -2514,36 +2569,7 @@ process_decls (void)
+ 		= DECL_INSN_RESERV (out_insn_reserv);
+ 	      DECL_BYPASS (decl)->in_insn_reserv
+ 		= DECL_INSN_RESERV (in_insn_reserv);
+-	      bypass
+-		= find_bypass (DECL_INSN_RESERV (out_insn_reserv)->bypass_list,
+-			       DECL_BYPASS (decl)->in_insn_reserv);
+-	      if (bypass != NULL)
+-		{
+-		  if (DECL_BYPASS (decl)->latency == bypass->latency)
+-		    {
+-		      if (!w_flag)
+-			error
+-			  ("the same bypass `%s - %s' is already defined",
+-			   DECL_BYPASS (decl)->out_insn_name,
+-			   DECL_BYPASS (decl)->in_insn_name);
+-		      else
+-			warning
+-			  (0, "the same bypass `%s - %s' is already defined",
+-			   DECL_BYPASS (decl)->out_insn_name,
+-			   DECL_BYPASS (decl)->in_insn_name);
+-		    }
+-		  else
+-		    error ("bypass `%s - %s' is already defined",
+-			   DECL_BYPASS (decl)->out_insn_name,
+-			   DECL_BYPASS (decl)->in_insn_name);
+-		}
+-	      else
+-		{
+-		  DECL_BYPASS (decl)->next
+-		    = DECL_INSN_RESERV (out_insn_reserv)->bypass_list;
+-		  DECL_INSN_RESERV (out_insn_reserv)->bypass_list
+-		    = DECL_BYPASS (decl);
+-		}
++	      insert_bypass (DECL_BYPASS (decl));
+ 	    }
+ 	}
+     }
+@@ -8159,19 +8185,32 @@ output_internal_insn_latency_func (void)
+ 			    (advance_cycle_insn_decl)->insn_num));
+ 	    fprintf (output_file, "        case %d:\n",
+ 		     bypass->in_insn_reserv->insn_num);
+-	    if (bypass->bypass_guard_name == NULL)
+-	      fprintf (output_file, "          return %d;\n",
+-		       bypass->latency);
+-	    else
++	    for (;;)
+ 	      {
+-		fprintf (output_file,
+-			 "          if (%s (%s, %s))\n",
+-			 bypass->bypass_guard_name, INSN_PARAMETER_NAME,
+-			 INSN2_PARAMETER_NAME);
+-		fprintf (output_file,
+-			 "            return %d;\n          break;\n",
+-			 bypass->latency);
++		if (bypass->bypass_guard_name == NULL)
++		  {
++		    gcc_assert (bypass->next == NULL
++				|| (bypass->in_insn_reserv
++				    != bypass->next->in_insn_reserv));
++		    fprintf (output_file, "          return %d;\n",
++			     bypass->latency);
++		  }
++		else
++		  {
++		    fprintf (output_file,
++			     "          if (%s (%s, %s))\n",
++			     bypass->bypass_guard_name, INSN_PARAMETER_NAME,
++			     INSN2_PARAMETER_NAME);
++		    fprintf (output_file, "            return %d;\n",
++			     bypass->latency);
++		  }
++		if (bypass->next == NULL
++		    || bypass->in_insn_reserv != bypass->next->in_insn_reserv)
++		  break;
++		bypass = bypass->next;
+ 	      }
++	    if (bypass->bypass_guard_name != NULL)
++	      fprintf (output_file, "          break;\n");
+ 	  }
+ 	fputs ("        }\n      break;\n", output_file);
+       }
+--- gcc/rtl.def	(.../trunk)	(revision 144460)
++++ gcc/rtl.def	(.../branches/ix86/atom)	(revision 144601)
+@@ -1088,7 +1088,11 @@ DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_a
+    guard for the bypass.  The function will get the two insns as
+    parameters.  If the function returns zero the bypass will be
+    ignored for this case.  Additional guard is necessary to recognize
+-   complicated bypasses, e.g. when consumer is load address.  */
++   complicated bypasses, e.g. when consumer is load address.  If there
++   are more one bypass with the same output and input insns, the
++   chosen bypass is the first bypass with a guard in description whose
++   guard function returns nonzero.  If there is no such bypass, then
++   bypass without the guard function is chosen.  */
+ DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA)
+ 
+ /* (define_automaton string) describes names of automata generated and
+--- gcc/rtl.h	(.../trunk)	(revision 144460)
++++ gcc/rtl.h	(.../branches/ix86/atom)	(revision 144601)
+@@ -1731,6 +1731,8 @@ extern rtx get_related_value (const_rtx)
+ extern bool offset_within_block_p (const_rtx, HOST_WIDE_INT);
+ extern void split_const (rtx, rtx *, rtx *);
+ extern int reg_mentioned_p (const_rtx, const_rtx);
++extern bool reg_mentioned_by_mem_p (const_rtx, const_rtx);
++extern bool reg_dep_by_addr_p (const_rtx, const_rtx);
+ extern int count_occurrences (const_rtx, const_rtx, int);
+ extern int reg_referenced_p (const_rtx, const_rtx);
+ extern int reg_used_between_p (const_rtx, const_rtx, const_rtx);
+--- gcc/config.gcc	(.../trunk)	(revision 144460)
++++ gcc/config.gcc	(.../branches/ix86/atom)	(revision 144601)
+@@ -1087,7 +1087,7 @@ i[34567]86-*-linux* | i[34567]86-*-kfree
+ 			tmake_file="${tmake_file} i386/t-linux64"
+ 			need_64bit_hwint=yes
+ 			case X"${with_cpu}" in
+-			Xgeneric|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
++			Xgeneric|Xatom|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
+ 				;;
+ 			X)
+ 				if test x$with_cpu_64 = x; then
+@@ -1096,7 +1096,7 @@ i[34567]86-*-linux* | i[34567]86-*-kfree
+ 				;;
+ 			*)
+ 				echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
+-				echo "generic core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
++				echo "generic atom core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
+ 				exit 1
+ 				;;
+ 			esac
+@@ -1201,7 +1201,7 @@ i[34567]86-*-solaris2*)
+ 		# libgcc/configure.ac instead.
+ 		need_64bit_hwint=yes
+ 		case X"${with_cpu}" in
+-		Xgeneric|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
++		Xgeneric|Xatom|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
+ 			;;
+ 		X)
+ 			if test x$with_cpu_64 = x; then
+@@ -1210,7 +1210,7 @@ i[34567]86-*-solaris2*)
+ 			;;
+ 		*)
+ 			echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
+-			echo "generic core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
++			echo "generic atom core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
+ 			exit 1
+ 			;;
+ 		esac
+@@ -2803,7 +2803,7 @@ case "${target}" in
+ 				esac
+ 				# OK
+ 				;;
+-			"" | amdfam10 | barcelona | k8 | opteron | athlon64 | athlon-fx | nocona | core2 | generic)
++			"" | amdfam10 | barcelona | k8 | opteron | athlon64 | athlon-fx | nocona | core2 | atom | generic)
+ 				# OK
+ 				;;
+ 			*)
+--- gcc/config/i386/i386.h	(.../trunk)	(revision 144460)
++++ gcc/config/i386/i386.h	(.../branches/ix86/atom)	(revision 144601)
+@@ -231,6 +231,7 @@ extern const struct processor_costs ix86
+ #define TARGET_GENERIC64 (ix86_tune == PROCESSOR_GENERIC64)
+ #define TARGET_GENERIC (TARGET_GENERIC32 || TARGET_GENERIC64)
+ #define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
++#define TARGET_ATOM (ix86_tune == PROCESSOR_ATOM)
+ 
+ /* Feature tests against the various tunings.  */
+ enum ix86_tune_indices {
+@@ -295,6 +296,7 @@ enum ix86_tune_indices {
+   X86_TUNE_USE_VECTOR_FP_CONVERTS,
+   X86_TUNE_USE_VECTOR_CONVERTS,
+   X86_TUNE_FUSE_CMP_AND_BRANCH,
++  X86_TUNE_OPT_AGU,
+ 
+   X86_TUNE_LAST
+ };
+@@ -382,6 +384,7 @@ extern unsigned char ix86_tune_features[
+ 	ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS]
+ #define TARGET_FUSE_CMP_AND_BRANCH \
+ 	ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH]
++#define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU]
+ 
+ /* Feature tests against the various architecture variations.  */
+ enum ix86_arch_indices {
+@@ -564,6 +567,7 @@ enum target_cpu_default
+   TARGET_CPU_DEFAULT_prescott,
+   TARGET_CPU_DEFAULT_nocona,
+   TARGET_CPU_DEFAULT_core2,
++  TARGET_CPU_DEFAULT_atom,
+ 
+   TARGET_CPU_DEFAULT_geode,
+   TARGET_CPU_DEFAULT_k6,
+@@ -2256,6 +2260,7 @@ enum processor_type
+   PROCESSOR_GENERIC32,
+   PROCESSOR_GENERIC64,
+   PROCESSOR_AMDFAM10,
++  PROCESSOR_ATOM,
+   PROCESSOR_max
+ };
+ 
+--- gcc/config/i386/i386.md	(.../trunk)	(revision 144460)
++++ gcc/config/i386/i386.md	(.../branches/ix86/atom)	(revision 144601)
+@@ -298,7 +298,7 @@ (define_constants
+ 
+ 
+ ;; Processor type.
+-(define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,
++(define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,atom,
+ 		    generic64,amdfam10"
+   (const (symbol_ref "ix86_schedule")))
+ 
+@@ -594,6 +594,12 @@ (define_attr "fp_int_src" "false,true"
+ (define_attr "i387_cw" "trunc,floor,ceil,mask_pm,uninitialized,any"
+   (const_string "any"))
+ 
++;; Define attribute to classify add/sub insns that consumes carry flag (CF)
++(define_attr "use_carry" "0,1" (const_string "0"))
++
++;; Define attribute to indicate unaligned ssemov insns
++(define_attr "movu" "0,1" (const_string "0"))
++
+ ;; Describe a user's asm statement.
+ (define_asm_attributes
+   [(set_attr "length" "128")
+@@ -709,6 +715,7 @@ (define_mode_iterator P [(SI "Pmode == S
+ (include "k6.md")
+ (include "athlon.md")
+ (include "geode.md")
++(include "atom.md")
+ 
+ 
+ ;; Operand and operator predicates and constraints
+@@ -5776,6 +5783,7 @@ (define_insn "adddi3_carry_rex64"
+   "TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
+   "adc{q}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "DI")])
+ 
+@@ -5850,6 +5858,7 @@ (define_insn "addqi3_carry"
+   "ix86_binary_operator_ok (PLUS, QImode, operands)"
+   "adc{b}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "QI")])
+ 
+@@ -5862,6 +5871,7 @@ (define_insn "addhi3_carry"
+   "ix86_binary_operator_ok (PLUS, HImode, operands)"
+   "adc{w}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "HI")])
+ 
+@@ -5874,6 +5884,7 @@ (define_insn "addsi3_carry"
+   "ix86_binary_operator_ok (PLUS, SImode, operands)"
+   "adc{l}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "SI")])
+ 
+@@ -5887,6 +5898,7 @@ (define_insn "*addsi3_carry_zext"
+   "TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands)"
+   "adc{l}\t{%2, %k0|%k0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "SI")])
+ 
+@@ -6116,9 +6128,9 @@ (define_insn_and_split "*lea_general_3_z
+    (set_attr "mode" "SI")])
+ 
+ (define_insn "*adddi_1_rex64"
+-  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rm,r")
+-	(plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0,r")
+-		 (match_operand:DI 2 "x86_64_general_operand" "rme,re,le")))
++  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rm,r,r")
++	(plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0,r,r")
++		 (match_operand:DI 2 "x86_64_general_operand" "rme,re,0,le")))
+    (clobber (reg:CC FLAGS_REG))]
+   "TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
+ {
+@@ -6139,6 +6151,10 @@ (define_insn "*adddi_1_rex64"
+ 	}
+ 
+     default:
++      /* Use add as much as possible to replace lea for AGU optimization. */
++      if (which_alternative == 2 && TARGET_OPT_AGU)
++        return "add{q}\t{%1, %0|%0, %1}";
++        
+       gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ 
+       /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+@@ -6157,8 +6173,11 @@ (define_insn "*adddi_1_rex64"
+     }
+ }
+   [(set (attr "type")
+-     (cond [(eq_attr "alternative" "2")
++     (cond [(and (eq_attr "alternative" "2") 
++                 (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
+ 	      (const_string "lea")
++            (eq_attr "alternative" "3")
++              (const_string "lea")
+ 	    ; Current assemblers are broken and do not allow @GOTOFF in
+ 	    ; ought but a memory context.
+ 	    (match_operand:DI 2 "pic_symbolic_operand" "")
+@@ -6175,8 +6194,8 @@ (define_split
+ 	(plus:DI (match_operand:DI 1 "register_operand" "")
+ 		 (match_operand:DI 2 "x86_64_nonmemory_operand" "")))
+    (clobber (reg:CC FLAGS_REG))]
+-  "TARGET_64BIT && reload_completed
+-   && true_regnum (operands[0]) != true_regnum (operands[1])"
++  "TARGET_64BIT && reload_completed 
++   && ix86_lea_for_add_ok (PLUS, insn, operands)"
+   [(set (match_dup 0)
+ 	(plus:DI (match_dup 1)
+ 		 (match_dup 2)))]
+@@ -6380,9 +6399,9 @@ (define_insn "*adddi_5_rex64"
+ 
+ 
+ (define_insn "*addsi_1"
+-  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,rm,r")
+-	(plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,r")
+-		 (match_operand:SI 2 "general_operand" "g,ri,li")))
++  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,rm,r,r")
++	(plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,r,r")
++		 (match_operand:SI 2 "general_operand" "g,ri,0,li")))
+    (clobber (reg:CC FLAGS_REG))]
+   "ix86_binary_operator_ok (PLUS, SImode, operands)"
+ {
+@@ -6403,6 +6422,10 @@ (define_insn "*addsi_1"
+ 	}
+ 
+     default:
++      /* Use add as much as possible to replace lea for AGU optimization. */
++      if (which_alternative == 2 && TARGET_OPT_AGU)
++        return "add{l}\t{%1, %0|%0, %1}";
++
+       gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ 
+       /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+@@ -6419,7 +6442,10 @@ (define_insn "*addsi_1"
+     }
+ }
+   [(set (attr "type")
+-     (cond [(eq_attr "alternative" "2")
++     (cond [(and (eq_attr "alternative" "2") 
++                 (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
++               (const_string "lea")
++            (eq_attr "alternative" "3")
+ 	      (const_string "lea")
+ 	    ; Current assemblers are broken and do not allow @GOTOFF in
+ 	    ; ought but a memory context.
+@@ -6437,8 +6463,7 @@ (define_split
+ 	(plus (match_operand 1 "register_operand" "")
+               (match_operand 2 "nonmemory_operand" "")))
+    (clobber (reg:CC FLAGS_REG))]
+-  "reload_completed
+-   && true_regnum (operands[0]) != true_regnum (operands[1])"
++  "reload_completed && ix86_lea_for_add_ok (PLUS, insn, operands)" 
+   [(const_int 0)]
+ {
+   rtx pat;
+@@ -7539,6 +7564,7 @@ (define_insn "subdi3_carry_rex64"
+   "TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)"
+   "sbb{q}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "DI")])
+ 
+@@ -7587,6 +7613,7 @@ (define_insn "subqi3_carry"
+   "ix86_binary_operator_ok (MINUS, QImode, operands)"
+   "sbb{b}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "QI")])
+ 
+@@ -7599,6 +7626,7 @@ (define_insn "subhi3_carry"
+   "ix86_binary_operator_ok (MINUS, HImode, operands)"
+   "sbb{w}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "HI")])
+ 
+@@ -7611,6 +7639,7 @@ (define_insn "subsi3_carry"
+   "ix86_binary_operator_ok (MINUS, SImode, operands)"
+   "sbb{l}\t{%2, %0|%0, %2}"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "mode" "SI")])
+ 
+@@ -15224,6 +15253,7 @@ (define_insn "return_internal"
+   "reload_completed"
+   "ret"
+   [(set_attr "length" "1")
++   (set_attr "atom_unit" "jeu")
+    (set_attr "length_immediate" "0")
+    (set_attr "modrm" "0")])
+ 
+@@ -15236,6 +15266,7 @@ (define_insn "return_internal_long"
+   "reload_completed"
+   "rep\;ret"
+   [(set_attr "length" "1")
++   (set_attr "atom_unit" "jeu")
+    (set_attr "length_immediate" "0")
+    (set_attr "prefix_rep" "1")
+    (set_attr "modrm" "0")])
+@@ -15246,6 +15277,7 @@ (define_insn "return_pop_internal"
+   "reload_completed"
+   "ret\t%0"
+   [(set_attr "length" "3")
++   (set_attr "atom_unit" "jeu")
+    (set_attr "length_immediate" "2")
+    (set_attr "modrm" "0")])
+ 
+@@ -16367,6 +16399,7 @@ (define_insn "*rcpsf2_sse"
+   "TARGET_SSE_MATH"
+   "%vrcpss\t{%1, %d0|%d0, %1}"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "rcp")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "SF")])
+ 
+@@ -16718,6 +16751,7 @@ (define_insn "*rsqrtsf2_sse"
+   "TARGET_SSE_MATH"
+   "%vrsqrtss\t{%1, %d0|%d0, %1}"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "rcp")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "SF")])
+ 
+@@ -16738,6 +16772,7 @@ (define_insn "*sqrt<mode>2_sse"
+   "SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH"
+   "%vsqrts<ssemodefsuffix>\t{%1, %d0|%d0, %1}"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "sqrt")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "<MODE>")
+    (set_attr "athlon_decode" "*")
+@@ -19791,6 +19826,7 @@ (define_insn "x86_movdicc_0_m1_rex64"
+   ; Since we don't have the proper number of operands for an alu insn,
+   ; fill in all the blanks.
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "memory" "none")
+    (set_attr "imm_disp" "false")
+@@ -19806,6 +19842,7 @@ (define_insn "*x86_movdicc_0_m1_se"
+   ""
+   "sbb{q}\t%0, %0"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "memory" "none")
+    (set_attr "imm_disp" "false")
+@@ -19849,6 +19886,7 @@ (define_insn "x86_movsicc_0_m1"
+   ; Since we don't have the proper number of operands for an alu insn,
+   ; fill in all the blanks.
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "memory" "none")
+    (set_attr "imm_disp" "false")
+@@ -19864,6 +19902,7 @@ (define_insn "*x86_movsicc_0_m1_se"
+   ""
+   "sbb{l}\t%0, %0"
+   [(set_attr "type" "alu")
++   (set_attr "use_carry" "1")
+    (set_attr "pent_pair" "pu")
+    (set_attr "memory" "none")
+    (set_attr "imm_disp" "false")
+@@ -20196,7 +20235,8 @@ (define_insn "pro_epilogue_adjust_stack_
+     }
+ }
+   [(set (attr "type")
+-	(cond [(eq_attr "alternative" "0")
++	(cond [(and (eq_attr "alternative" "0") 
++	            (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
+ 		 (const_string "alu")
+ 	       (match_operand:SI 2 "const0_operand" "")
+ 		 (const_string "imov")
+@@ -20239,7 +20279,8 @@ (define_insn "pro_epilogue_adjust_stack_
+     }
+ }
+   [(set (attr "type")
+-	(cond [(eq_attr "alternative" "0")
++	(cond [(and (eq_attr "alternative" "0")
++	            (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
+ 		 (const_string "alu")
+ 	       (match_operand:DI 2 "const0_operand" "")
+ 		 (const_string "imov")
+@@ -21731,6 +21772,7 @@ (define_insn "*prefetch_sse"
+   return patterns[locality];
+ }
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "prefetch")
+    (set_attr "memory" "none")])
+ 
+ (define_insn "*prefetch_sse_rex"
+@@ -21749,6 +21791,7 @@ (define_insn "*prefetch_sse_rex"
+   return patterns[locality];
+ }
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "prefetch")
+    (set_attr "memory" "none")])
+ 
+ (define_insn "*prefetch_3dnow"
+--- gcc/config/i386/atom.md	(.../trunk)	(revision 0)
++++ gcc/config/i386/atom.md	(.../branches/ix86/atom)	(revision 144601)
+@@ -0,0 +1,796 @@
++;; Atom Scheduling
++;; Copyright (C) 2009 Free Software Foundation, Inc.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 2, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING.  If not, write to
++;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
++;; Boston, MA 02110-1301, USA.  */
++;;
++;; Atom is an in-order core with two integer pipelines.
++
++
++(define_attr "atom_unit" "sishuf,simul,jeu,complex,other" 
++  (const_string "other"))
++
++(define_attr "atom_sse_attr" "rcp,movdup,lfence,fence,prefetch,sqrt,mxcsr,other"
++  (const_string "other"))
++
++(define_automaton "atom")
++
++;;  Atom has two ports: port 0 and port 1 connecting to all execution units
++(define_cpu_unit "atom-port-0,atom-port-1" "atom")
++
++;;  EU: Execution Unit
++;;  Atom EUs are connected by port 0 or port 1. 
++
++(define_cpu_unit "atom-eu-0, atom-eu-1,
++                  atom-imul-1, atom-imul-2, atom-imul-3, atom-imul-4"
++                  "atom")
++
++;; Some EUs have duplicated copied and can be accessed via either
++;; port 0 or port 1
++;; (define_reservation "atom-port-either" "(atom-port-0 | atom-port-1)")
++
++;;; Some instructions is dual-pipe execution, need both ports
++;;; Complex multi-op macro-instructoins need both ports and all EUs
++(define_reservation "atom-port-dual" "(atom-port-0 + atom-port-1)")
++(define_reservation "atom-all-eu" "(atom-eu-0 + atom-eu-1 + 
++                                    atom-imul-1 + atom-imul-2 + atom-imul-3 +
++                                    atom-imul-4)")
++
++;;; Most of simple instructions have 1 cycle latency. Some of them
++;;; issue in port 0, some in port 0 and some in either port.
++(define_reservation "atom-simple-0" "(atom-port-0 + atom-eu-0)")
++(define_reservation "atom-simple-1" "(atom-port-1 + atom-eu-1)")
++(define_reservation "atom-simple-either" "(atom-simple-0 | atom-simple-1)")
++
++;;; Some insn issues in port 0 with 3 cycle latency and 1 cycle tput
++(define_reservation "atom-eu-0-3-1" "(atom-port-0 + atom-eu-0, nothing*2)")
++
++;;; fmul insn can have 4 or 5 cycles latency
++(define_reservation "atom-fmul-5c" "(atom-port-0 + atom-eu-0), nothing*4")
++(define_reservation "atom-fmul-4c" "(atom-port-0 + atom-eu-0), nothing*3")
++
++;;; fadd can has 5 cycles latency depends on instruction forms
++(define_reservation "atom-fadd-5c" "(atom-port-1 + atom-eu-1), nothing*5")
++
++;;; imul insn has 5 cycles latency
++(define_reservation "atom-imul-32" 
++                    "atom-imul-1, atom-imul-2, atom-imul-3, atom-imul-4, 
++                     atom-port-0")
++;;; imul instruction excludes other non-FP instructions.
++(exclusion_set "atom-eu-0, atom-eu-1" 
++               "atom-imul-1, atom-imul-2, atom-imul-3, atom-imul-4")
++
++;;; dual-execution instructions can have 1,2,4,5 cycles latency depends on 
++;;; instruction forms
++(define_reservation "atom-dual-1c" "(atom-port-dual + atom-eu-0 + atom-eu-1)")
++(define_reservation "atom-dual-2c"
++                    "(atom-port-dual + atom-eu-0 + atom-eu-1, nothing)")
++(define_reservation "atom-dual-5c"
++                    "(atom-port-dual + atom-eu-0 + atom-eu-1, nothing*4)")
++
++;;; Complex macro-instruction has variants of latency, and uses both ports.
++(define_reservation "atom-complex" "(atom-port-dual + atom-all-eu)")
++
++(define_insn_reservation  "atom_other" 9
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "other")
++            (eq_attr "atom_unit" "!jeu")))
++  "atom-complex, atom-all-eu*8")
++
++;; return has type "other" with atom_unit "jeu"
++(define_insn_reservation  "atom_other_2" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "other")
++            (eq_attr "atom_unit" "jeu")))
++  "atom-dual-1c")
++
++(define_insn_reservation  "atom_multi" 9
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "multi"))
++  "atom-complex, atom-all-eu*8")
++
++;; Normal alu insns without carry
++(define_insn_reservation  "atom_alu" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "alu")
++            (and (eq_attr "memory" "none")
++                 (eq_attr "use_carry" "0"))))
++  "atom-simple-either")
++
++;; Normal alu insns without carry
++(define_insn_reservation  "atom_alu_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "alu")
++            (and (eq_attr "memory" "!none")
++                 (eq_attr "use_carry" "0"))))
++  "atom-simple-either")
++
++;; Alu insn consuming CF, such as add/sbb
++(define_insn_reservation  "atom_alu_carry" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "alu")
++            (and (eq_attr "memory" "none")
++                 (eq_attr "use_carry" "1"))))
++  "atom-simple-either")
++
++;; Alu insn consuming CF, such as add/sbb
++(define_insn_reservation  "atom_alu_carry_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "alu")
++            (and (eq_attr "memory" "!none")
++                (eq_attr "use_carry" "1"))))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_alu1" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "alu1")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_alu1_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "alu1")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_negnot" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "negnot")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_negnot_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "negnot")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_imov" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imov")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_imov_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imov")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++;; 16<-16, 32<-32
++(define_insn_reservation  "atom_imovx" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imovx")
++            (and (eq_attr "memory" "none")
++                 (ior (and (match_operand:HI 0 "register_operand")
++                           (match_operand:HI 1 "general_operand"))
++                      (and (match_operand:SI 0 "register_operand")
++                           (match_operand:SI 1 "general_operand"))))))
++  "atom-simple-either")
++
++;; 16<-16, 32<-32, mem
++(define_insn_reservation  "atom_imovx_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imovx")
++            (and (eq_attr "memory" "!none")
++                 (ior (and (match_operand:HI 0 "register_operand")
++                           (match_operand:HI 1 "general_operand"))
++                      (and (match_operand:SI 0 "register_operand")
++                           (match_operand:SI 1 "general_operand"))))))
++  "atom-simple-either")
++
++;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8
++(define_insn_reservation  "atom_imovx_2" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imovx")
++            (and (eq_attr "memory" "none")
++                 (ior (match_operand:QI 0 "register_operand")
++                      (ior (and (match_operand:SI 0 "register_operand")
++                                (not (match_operand:SI 1 "general_operand")))
++                           (match_operand:DI 0 "register_operand"))))))
++  "atom-simple-0")
++
++;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8, mem
++(define_insn_reservation  "atom_imovx_2_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imovx")
++            (and (eq_attr "memory" "!none")
++                 (ior (match_operand:QI 0 "register_operand")
++                      (ior (and (match_operand:SI 0 "register_operand")
++                                (not (match_operand:SI 1 "general_operand")))
++                           (match_operand:DI 0 "register_operand"))))))
++  "atom-simple-0")
++
++;; 16<-8
++(define_insn_reservation  "atom_imovx_3" 3
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imovx")
++            (and (match_operand:HI 0 "register_operand")
++                 (match_operand:QI 1 "general_operand"))))
++  "atom-complex, atom-all-eu*2")
++
++(define_insn_reservation  "atom_lea" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "lea")
++            (eq_attr "mode" "!HI")))
++  "atom-simple-either")
++
++;; lea 16bit address is complex insn
++(define_insn_reservation  "atom_lea_2" 2
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "lea")
++            (eq_attr "mode" "HI")))
++  "atom-complex, atom-all-eu")
++
++(define_insn_reservation  "atom_incdec" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "incdec")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_incdec_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "incdec")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++;; simple shift instruction use SHIFT eu, none memory
++(define_insn_reservation  "atom_ishift" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ishift")
++            (and (eq_attr "memory" "none") (eq_attr "prefix_0f" "0"))))
++  "atom-simple-0")
++
++;; simple shift instruction use SHIFT eu, memory
++(define_insn_reservation  "atom_ishift_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ishift")
++            (and (eq_attr "memory" "!none") (eq_attr "prefix_0f" "0"))))
++  "atom-simple-0")
++
++;; DF shift (prefixed with 0f) is complex insn with latency of 7 cycles
++(define_insn_reservation  "atom_ishift_3" 7
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ishift")
++            (eq_attr "prefix_0f" "1")))
++  "atom-complex, atom-all-eu*6")
++
++(define_insn_reservation  "atom_ishift1" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ishift1")
++            (eq_attr "memory" "none")))
++  "atom-simple-0")
++
++(define_insn_reservation  "atom_ishift1_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ishift1")
++            (eq_attr "memory" "!none")))
++  "atom-simple-0")
++
++(define_insn_reservation  "atom_rotate" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "rotate")
++            (eq_attr "memory" "none")))
++  "atom-simple-0")
++
++(define_insn_reservation  "atom_rotate_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "rotate")
++            (eq_attr "memory" "!none")))
++  "atom-simple-0")
++
++(define_insn_reservation  "atom_rotate1" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "rotate1")
++            (eq_attr "memory" "none")))
++  "atom-simple-0")
++
++(define_insn_reservation  "atom_rotate1_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "rotate1")
++            (eq_attr "memory" "!none")))
++  "atom-simple-0")
++
++(define_insn_reservation  "atom_imul" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imul")
++            (and (eq_attr "memory" "none") (eq_attr "mode" "SI"))))
++  "atom-imul-32")
++
++(define_insn_reservation  "atom_imul_mem" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imul")
++            (and (eq_attr "memory" "!none") (eq_attr "mode" "SI"))))
++  "atom-imul-32")
++
++;; latency set to 10 as common 64x64 imul
++(define_insn_reservation  "atom_imul_3" 10
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "imul")
++            (eq_attr "mode" "!SI")))
++  "atom-complex, atom-all-eu*9")
++
++(define_insn_reservation  "atom_idiv" 65
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "idiv"))
++  "atom-complex, atom-all-eu*32, nothing*32")
++
++(define_insn_reservation  "atom_icmp" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "icmp")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_icmp_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "icmp")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_test" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "test")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_test_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "test")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_ibr" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ibr")
++            (eq_attr "memory" "!load")))
++  "atom-simple-1")
++
++;; complex if jump target is from address
++(define_insn_reservation  "atom_ibr_2" 2
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ibr")
++            (eq_attr "memory" "load")))
++  "atom-complex, atom-all-eu")
++
++(define_insn_reservation  "atom_setcc" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "setcc")
++            (eq_attr "memory" "!store")))
++  "atom-simple-either")
++
++;; 2 cycles complex if target is in memory
++(define_insn_reservation  "atom_setcc_2" 2
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "setcc")
++            (eq_attr "memory" "store")))
++  "atom-complex, atom-all-eu")
++
++(define_insn_reservation  "atom_icmov" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "icmov")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_icmov_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "icmov")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++;; UCODE if segreg, ignored
++(define_insn_reservation  "atom_push" 2
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "push"))
++  "atom-dual-2c")
++
++;; pop r64 is 1 cycle. UCODE if segreg, ignored
++(define_insn_reservation  "atom_pop" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "pop")
++            (eq_attr "mode" "DI")))
++  "atom-dual-1c")
++
++;; pop non-r64 is 2 cycles. UCODE if segreg, ignored
++(define_insn_reservation  "atom_pop_2" 2
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "pop")
++            (eq_attr "mode" "!DI")))
++  "atom-dual-2c")
++
++;; UCODE if segreg, ignored
++(define_insn_reservation  "atom_call" 1
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "call"))
++  "atom-dual-1c")
++
++(define_insn_reservation  "atom_callv" 1
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "callv"))
++  "atom-dual-1c")
++
++(define_insn_reservation  "atom_leave" 3
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "leave"))
++  "atom-complex, atom-all-eu*2")
++
++(define_insn_reservation  "atom_str" 3
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "str"))
++  "atom-complex, atom-all-eu*2")
++
++(define_insn_reservation  "atom_sselog" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sselog")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_sselog_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sselog")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_sselog1" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sselog1")
++            (eq_attr "memory" "none")))
++  "atom-simple-0")
++
++(define_insn_reservation  "atom_sselog1_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sselog1")
++            (eq_attr "memory" "!none")))
++  "atom-simple-0")
++
++;; not pmad, not psad
++(define_insn_reservation  "atom_sseiadd" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseiadd")
++            (and (not (match_operand:V2DI 0 "register_operand"))
++                 (and (eq_attr "atom_unit" "!simul")
++                      (eq_attr "atom_unit" "!complex")))))
++  "atom-simple-either")
++
++;; pmad, psad and 64
++(define_insn_reservation  "atom_sseiadd_2" 4
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseiadd")
++            (and (not (match_operand:V2DI 0 "register_operand"))
++                 (and (eq_attr "atom_unit" "simul" )
++                      (eq_attr "mode" "DI")))))
++  "atom-fmul-4c")
++
++;; pmad, psad and 128
++(define_insn_reservation  "atom_sseiadd_3" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseiadd")
++            (and (not (match_operand:V2DI 0 "register_operand"))
++                 (and (eq_attr "atom_unit" "simul" )
++                      (eq_attr "mode" "TI")))))
++  "atom-fmul-5c")
++
++;; if paddq(64 bit op), phadd/phsub
++(define_insn_reservation  "atom_sseiadd_4" 6
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseiadd")
++            (ior (match_operand:V2DI 0 "register_operand")
++                 (eq_attr "atom_unit" "complex"))))
++  "atom-complex, atom-all-eu*5")
++
++;; if immediate op. 
++(define_insn_reservation  "atom_sseishft" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseishft")
++            (and (eq_attr "atom_unit" "!sishuf")
++                 (match_operand 2 "immediate_operand"))))
++  "atom-simple-either")
++
++;; if palignr or psrldq
++(define_insn_reservation  "atom_sseishft_2" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseishft")
++            (and (eq_attr "atom_unit" "sishuf")
++                 (match_operand 2 "immediate_operand"))))
++  "atom-simple-0")
++
++;; if reg/mem op
++(define_insn_reservation  "atom_sseishft_3" 2
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseishft")
++            (not (match_operand 2 "immediate_operand"))))
++  "atom-complex, atom-all-eu")
++
++(define_insn_reservation  "atom_sseimul" 1
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "sseimul"))
++  "atom-simple-0")
++
++;; rcpss or rsqrtss
++(define_insn_reservation  "atom_sse" 4
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sse")
++            (and (eq_attr "atom_sse_attr" "rcp") (eq_attr "mode" "SF"))))
++  "atom-fmul-4c")
++
++;; movshdup, movsldup. Suggest to type sseishft
++(define_insn_reservation  "atom_sse_2" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sse")
++            (eq_attr "atom_sse_attr" "movdup")))
++  "atom-simple-0")
++
++;; lfence
++(define_insn_reservation  "atom_sse_3" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sse")
++            (eq_attr "atom_sse_attr" "lfence")))
++  "atom-simple-either")
++
++;; sfence,clflush,mfence, prefetch
++(define_insn_reservation  "atom_sse_4" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sse")
++            (ior (eq_attr "atom_sse_attr" "fence")
++                 (eq_attr "atom_sse_attr" "prefetch"))))
++  "atom-simple-0")
++
++;; rcpps, rsqrtss, sqrt, ldmxcsr
++(define_insn_reservation  "atom_sse_5" 7
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sse")
++            (ior (ior (eq_attr "atom_sse_attr" "sqrt")
++                      (eq_attr "atom_sse_attr" "mxcsr"))
++                 (and (eq_attr "atom_sse_attr" "rcp")
++                      (eq_attr "mode" "V4SF")))))
++  "atom-complex, atom-all-eu*6")
++
++;; xmm->xmm
++(define_insn_reservation  "atom_ssemov" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssemov")
++            (and (match_operand 0 "register_operand" "xy") (match_operand 1 "register_operand" "xy"))))
++  "atom-simple-either")
++
++;; reg->xmm
++(define_insn_reservation  "atom_ssemov_2" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssemov")
++            (and (match_operand 0 "register_operand" "xy") (match_operand 1 "register_operand" "r"))))
++  "atom-simple-0")
++
++;; xmm->reg
++(define_insn_reservation  "atom_ssemov_3" 3
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssemov")
++            (and (match_operand 0 "register_operand" "r") (match_operand 1 "register_operand" "xy"))))
++  "atom-eu-0-3-1")
++
++;; mov mem
++(define_insn_reservation  "atom_ssemov_4" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssemov")
++            (and (eq_attr "movu" "0") (eq_attr "memory" "!none"))))
++  "atom-simple-0")
++
++;; movu mem
++(define_insn_reservation  "atom_ssemov_5" 2
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssemov")
++            (ior (eq_attr "movu" "1") (eq_attr "memory" "!none"))))
++  "atom-complex, atom-all-eu")
++
++;; no memory simple
++(define_insn_reservation  "atom_sseadd" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseadd")
++            (and (eq_attr "memory" "none")
++                 (and (eq_attr "mode" "!V2DF")
++                      (eq_attr "atom_unit" "!complex")))))
++  "atom-fadd-5c")
++
++;; memory simple
++(define_insn_reservation  "atom_sseadd_mem" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseadd")
++            (and (eq_attr "memory" "!none")
++                 (and (eq_attr "mode" "!V2DF")
++                      (eq_attr "atom_unit" "!complex")))))
++  "atom-dual-5c")
++
++;; maxps, minps, *pd, hadd, hsub
++(define_insn_reservation  "atom_sseadd_3" 8
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseadd")
++            (ior (eq_attr "mode" "V2DF") (eq_attr "atom_unit" "complex"))))
++  "atom-complex, atom-all-eu*7")
++
++;; Except dppd/dpps
++(define_insn_reservation  "atom_ssemul" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssemul")
++            (eq_attr "mode" "!SF")))
++  "atom-fmul-5c")
++
++;; Except dppd/dpps, 4 cycle if mulss
++(define_insn_reservation  "atom_ssemul_2" 4
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssemul")
++            (eq_attr "mode" "SF")))
++  "atom-fmul-4c")
++
++(define_insn_reservation  "atom_ssecmp" 1
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "ssecmp"))
++  "atom-simple-either")
++
++(define_insn_reservation  "atom_ssecomi" 10
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "ssecomi"))
++  "atom-complex, atom-all-eu*9")
++
++;; no memory and cvtpi2ps, cvtps2pi, cvttps2pi
++(define_insn_reservation  "atom_ssecvt" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssecvt")
++            (ior (and (match_operand:V2SI 0 "register_operand")
++                      (match_operand:V4SF 1 "register_operand"))
++                 (and (match_operand:V4SF 0 "register_operand")
++                      (match_operand:V2SI 1 "register_operand")))))
++  "atom-fadd-5c")
++
++;; memory and cvtpi2ps, cvtps2pi, cvttps2pi
++(define_insn_reservation  "atom_ssecvt_2" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssecvt")
++            (ior (and (match_operand:V2SI 0 "register_operand")
++                      (match_operand:V4SF 1 "memory_operand"))
++                 (and (match_operand:V4SF 0 "register_operand")
++                      (match_operand:V2SI 1 "memory_operand")))))
++  "atom-dual-5c")
++
++;; otherwise. 7 cycles average for cvtss2sd
++(define_insn_reservation  "atom_ssecvt_3" 7
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "ssecvt")
++            (not (ior (and (match_operand:V2SI 0 "register_operand")
++                           (match_operand:V4SF 1 "nonimmediate_operand"))
++                      (and (match_operand:V4SF 0 "register_operand")
++                           (match_operand:V2SI 1 "nonimmediate_operand"))))))
++  "atom-complex, atom-all-eu*6")
++
++;; memory and cvtsi2sd
++(define_insn_reservation  "atom_sseicvt" 5
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseicvt")
++            (and (match_operand:V2DF 0 "register_operand")
++                 (match_operand:SI 1 "memory_operand"))))
++  "atom-dual-5c")
++
++;; otherwise. 8 cycles average for cvtsd2si
++(define_insn_reservation  "atom_sseicvt_2" 8
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "sseicvt")
++            (not (and (match_operand:V2DF 0 "register_operand")
++                      (match_operand:SI 1 "memory_operand")))))
++  "atom-complex, atom-all-eu*7")
++
++(define_insn_reservation  "atom_ssediv" 62
++  (and (eq_attr "cpu" "atom")
++       (eq_attr "type" "ssediv"))
++  "atom-complex, atom-all-eu*12, nothing*49")
++
++;; simple for fmov
++(define_insn_reservation  "atom_fmov" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "fmov")
++            (eq_attr "memory" "none")))
++  "atom-simple-either")
++
++;; simple for fmov
++(define_insn_reservation  "atom_fmov_mem" 1
++  (and (eq_attr "cpu" "atom")
++       (and (eq_attr "type" "fmov")
++            (eq_attr "memory" "!none")))
++  "atom-simple-either")
++
++;; Define bypass here
++
++;; There will be no stall from lea to non-mem EX insns
++(define_bypass 0 "atom_lea"
++                 "atom_alu_carry,
++                  atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
++                  atom_incdec, atom_setcc, atom_icmov, atom_pop")
++
++(define_bypass 0 "atom_lea"
++                 "atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
++                  atom_imovx_mem, atom_imovx_2_mem,
++                  atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
++                 "!ix86_agi_dependent")
++
++;; There will be 3 cycles stall from EX insns to AGAN insns LEA
++(define_bypass 4 "atom_alu_carry,
++                  atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
++                  atom_incdec,atom_ishift,atom_ishift1,atom_rotate,
++                  atom_rotate1, atom_setcc, atom_icmov, atom_pop,
++                  atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
++                  atom_imovx_mem, atom_imovx_2_mem,
++                  atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
++                 "atom_lea")
++
++;; There will be 3 cycles stall from EX insns to insns need addr calculation
++(define_bypass 4 "atom_alu_carry,
++                  atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
++                  atom_incdec,atom_ishift,atom_ishift1,atom_rotate,
++                  atom_rotate1, atom_setcc, atom_icmov, atom_pop,
++                  atom_imovx_mem, atom_imovx_2_mem,
++                  atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
++                  atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
++                 "atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
++                  atom_negnot_mem, atom_imov_mem, atom_incdec_mem,
++                  atom_imovx_mem, atom_imovx_2_mem,
++                  atom_imul_mem, atom_icmp_mem,
++                  atom_test_mem, atom_icmov_mem, atom_sselog_mem,
++                  atom_sselog1_mem, atom_fmov_mem, atom_sseadd_mem,
++                  atom_ishift_mem, atom_ishift1_mem, 
++                  atom_rotate_mem, atom_rotate1_mem"
++                  "ix86_agi_dependent")
++
++;; Stall from imul to lea is 8 cycles.
++(define_bypass 9 "atom_imul, atom_imul_mem" "atom_lea")
++
++;; Stall from imul to memory address is 8 cycles.
++(define_bypass 9 "atom_imul, atom_imul_mem" 
++                 "atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
++                  atom_negnot_mem, atom_imov_mem, atom_incdec_mem,
++                  atom_ishift_mem, atom_ishift1_mem, atom_rotate_mem,
++                  atom_rotate1_mem, atom_imul_mem, atom_icmp_mem,
++                  atom_test_mem, atom_icmov_mem, atom_sselog_mem,
++                  atom_sselog1_mem, atom_fmov_mem, atom_sseadd_mem"
++                  "ix86_agi_dependent")
++
++;; There will be 0 cycle stall from cmp/test to jcc
++
++;; There will be 1 cycle stall from flag producer to cmov and adc/sbb
++(define_bypass 2 "atom_icmp, atom_test, atom_alu, atom_alu_carry,
++                  atom_alu1, atom_negnot, atom_incdec, atom_ishift,
++                  atom_ishift1, atom_rotate, atom_rotate1"
++                 "atom_icmov, atom_alu_carry")
++
++;; lea to shift count stall is 2 cycles
++(define_bypass 3 "atom_lea"
++                 "atom_ishift, atom_ishift1, atom_rotate, atom_rotate1,
++                  atom_ishift_mem, atom_ishift1_mem, 
++                  atom_rotate_mem, atom_rotate1_mem"
++                 "ix86_dep_by_shift_count")
++
++;; lea to shift source stall is 1 cycle
++(define_bypass 2 "atom_lea"
++                 "atom_ishift, atom_ishift1, atom_rotate, atom_rotate1"
++                 "!ix86_dep_by_shift_count")
++
++;; non-lea to shift count stall is 1 cycle
++(define_bypass 2 "atom_alu_carry,
++                  atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
++                  atom_incdec,atom_ishift,atom_ishift1,atom_rotate,
++                  atom_rotate1, atom_setcc, atom_icmov, atom_pop,
++                  atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
++                  atom_imovx_mem, atom_imovx_2_mem,
++                  atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
++                 "atom_ishift, atom_ishift1, atom_rotate, atom_rotate1,
++                  atom_ishift_mem, atom_ishift1_mem, 
++                  atom_rotate_mem, atom_rotate1_mem"
++                 "ix86_dep_by_shift_count")
+--- gcc/config/i386/sse.md	(.../trunk)	(revision 144460)
++++ gcc/config/i386/sse.md	(.../branches/ix86/atom)	(revision 144601)
+@@ -338,6 +338,7 @@ (define_insn "avx_movup<avxmodesuffixf2c
+    && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+   "vmovup<avxmodesuffixf2c>\t{%1, %0|%0, %1}"
+   [(set_attr "type" "ssemov")
++   (set_attr "movu" "1")
+    (set_attr "prefix" "vex")
+    (set_attr "mode" "<MODE>")])
+ 
+@@ -363,6 +364,7 @@ (define_insn "<sse>_movup<ssemodesuffixf
+    && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+   "movup<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
+   [(set_attr "type" "ssemov")
++   (set_attr "movu" "1")
+    (set_attr "mode" "<MODE>")])
+ 
+ (define_insn "avx_movdqu<avxmodesuffix>"
+@@ -373,6 +375,7 @@ (define_insn "avx_movdqu<avxmodesuffix>"
+   "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+   "vmovdqu\t{%1, %0|%0, %1}"
+   [(set_attr "type" "ssemov")
++   (set_attr "movu" "1")
+    (set_attr "prefix" "vex")
+    (set_attr "mode" "<avxvecmode>")])
+ 
+@@ -383,6 +386,7 @@ (define_insn "sse2_movdqu"
+   "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+   "movdqu\t{%1, %0|%0, %1}"
+   [(set_attr "type" "ssemov")
++   (set_attr "movu" "1")
+    (set_attr "prefix_data16" "1")
+    (set_attr "mode" "TI")])
+ 
+@@ -424,7 +428,7 @@ (define_insn "sse2_movntv2di"
+ 		     UNSPEC_MOVNT))]
+   "TARGET_SSE2"
+   "movntdq\t{%1, %0|%0, %1}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
+    (set_attr "prefix_data16" "1")
+    (set_attr "mode" "TI")])
+ 
+@@ -434,7 +438,7 @@ (define_insn "sse2_movntsi"
+ 		   UNSPEC_MOVNT))]
+   "TARGET_SSE2"
+   "movnti\t{%1, %0|%0, %1}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
+    (set_attr "mode" "V2DF")])
+ 
+ (define_insn "avx_lddqu<avxmodesuffix>"
+@@ -445,6 +449,7 @@ (define_insn "avx_lddqu<avxmodesuffix>"
+   "TARGET_AVX"
+   "vlddqu\t{%1, %0|%0, %1}"
+   [(set_attr "type" "ssecvt")
++   (set_attr "movu" "1")
+    (set_attr "prefix" "vex")
+    (set_attr "mode" "<avxvecmode>")])
+ 
+@@ -454,7 +459,8 @@ (define_insn "sse3_lddqu"
+ 		      UNSPEC_LDDQU))]
+   "TARGET_SSE3"
+   "lddqu\t{%1, %0|%0, %1}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
++   (set_attr "movu" "1")
+    (set_attr "prefix_rep" "1")
+    (set_attr "mode" "TI")])
+ 
+@@ -761,6 +767,7 @@ (define_insn "sse_rcpv4sf2"
+   "TARGET_SSE"
+   "%vrcpps\t{%1, %0|%0, %1}"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "rcp")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "V4SF")])
+ 
+@@ -787,6 +794,7 @@ (define_insn "sse_vmrcpv4sf2"
+   "TARGET_SSE"
+   "rcpss\t{%1, %0|%0, %1}"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "rcp")
+    (set_attr "mode" "SF")])
+ 
+ (define_expand "sqrtv8sf2"
+@@ -832,6 +840,7 @@ (define_insn "sse_sqrtv4sf2"
+   "TARGET_SSE"
+   "%vsqrtps\t{%1, %0|%0, %1}"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "sqrt")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "V4SF")])
+ 
+@@ -876,6 +885,7 @@ (define_insn "<sse>_vmsqrt<mode>2"
+   "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
+   "sqrts<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "sqrt")
+    (set_attr "mode" "<ssescalarmode>")])
+ 
+ (define_expand "rsqrtv8sf2"
+@@ -1039,7 +1049,7 @@ (define_insn "<sse>_vm<code><mode>3"
+ 	 (const_int 1)))]
+   "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
+   "<maxminfprefix>s<ssemodesuffixf2c>\t{%2, %0|%0, %2}"
+-  [(set_attr "type" "sse")
++  [(set_attr "type" "sseadd")
+    (set_attr "mode" "<ssescalarmode>")])
+ 
+ ;; These versions of the min/max patterns implement exactly the operations
+@@ -1175,6 +1185,7 @@ (define_insn "sse3_addsubv2df3"
+   "TARGET_SSE3"
+   "addsubpd\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "mode" "V2DF")])
+ 
+ (define_insn "avx_h<plusminus_insn>v4df3"
+@@ -1298,6 +1309,7 @@ (define_insn "sse3_h<plusminus_insn>v4sf
+   "TARGET_SSE3"
+   "h<plusminus_mnemonic>ps\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_rep" "1")
+    (set_attr "mode" "V4SF")])
+ 
+@@ -5066,6 +5078,7 @@ (define_insn "*sse2_pmaddwd"
+   "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+   "pmaddwd\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "simul")
+    (set_attr "prefix_data16" "1")
+    (set_attr "mode" "TI")])
+ 
+@@ -7025,6 +7038,7 @@ (define_insn "*vec_extractv2di_1_rex64"
+    movq\t{%H1, %0|%0, %H1}
+    mov{q}\t{%H1, %0|%0, %H1}"
+   [(set_attr "type" "ssemov,sseishft,ssemov,imov")
++   (set_attr "atom_unit" "*,sishuf,*,*")
+    (set_attr "memory" "*,none,*,*")
+    (set_attr "mode" "V2SF,TI,TI,DI")])
+ 
+@@ -7057,6 +7071,7 @@ (define_insn "*vec_extractv2di_1_sse2"
+    psrldq\t{$8, %0|%0, 8}
+    movq\t{%H1, %0|%0, %H1}"
+   [(set_attr "type" "ssemov,sseishft,ssemov")
++   (set_attr "atom_unit" "*,sishuf,*")
+    (set_attr "memory" "*,none,*")
+    (set_attr "mode" "V2SF,TI,TI")])
+ 
+@@ -7614,6 +7629,7 @@ (define_insn "sse2_psadbw"
+   "TARGET_SSE2"
+   "psadbw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "simul")
+    (set_attr "prefix_data16" "1")
+    (set_attr "mode" "TI")])
+ 
+@@ -7635,7 +7651,7 @@ (define_insn "<sse>_movmskp<ssemodesuffi
+ 	  UNSPEC_MOVMSK))]
+   "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
+   "%vmovmskp<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "<MODE>")])
+ 
+@@ -7645,7 +7661,7 @@ (define_insn "sse2_pmovmskb"
+ 		   UNSPEC_MOVMSK))]
+   "TARGET_SSE2"
+   "%vpmovmskb\t{%1, %0|%0, %1}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "SI")])
+@@ -7668,7 +7684,7 @@ (define_insn "*sse2_maskmovdqu"
+   "TARGET_SSE2 && !TARGET_64BIT"
+   ;; @@@ check ordering of operands in intel/nonintel syntax
+   "%vmaskmovdqu\t{%2, %1|%1, %2}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "TI")])
+@@ -7682,7 +7698,7 @@ (define_insn "*sse2_maskmovdqu_rex64"
+   "TARGET_SSE2 && TARGET_64BIT"
+   ;; @@@ check ordering of operands in intel/nonintel syntax
+   "%vmaskmovdqu\t{%2, %1|%1, %2}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "TI")])
+@@ -7693,6 +7709,7 @@ (define_insn "sse_ldmxcsr"
+   "TARGET_SSE"
+   "%vldmxcsr\t%0"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "mxcsr")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "memory" "load")])
+ 
+@@ -7702,6 +7719,7 @@ (define_insn "sse_stmxcsr"
+   "TARGET_SSE"
+   "%vstmxcsr\t%0"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "mxcsr")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "memory" "store")])
+ 
+@@ -7720,6 +7738,7 @@ (define_insn "*sse_sfence"
+   "TARGET_SSE || TARGET_3DNOW_A"
+   "sfence"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "fence")
+    (set_attr "memory" "unknown")])
+ 
+ (define_insn "sse2_clflush"
+@@ -7728,6 +7747,7 @@ (define_insn "sse2_clflush"
+   "TARGET_SSE2"
+   "clflush\t%a0"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "fence")
+    (set_attr "memory" "unknown")])
+ 
+ (define_expand "sse2_mfence"
+@@ -7745,6 +7765,7 @@ (define_insn "*sse2_mfence"
+   "TARGET_64BIT || TARGET_SSE2"
+   "mfence"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "fence")
+    (set_attr "memory" "unknown")])
+ 
+ (define_expand "sse2_lfence"
+@@ -7762,6 +7783,7 @@ (define_insn "*sse2_lfence"
+   "TARGET_SSE2"
+   "lfence"
+   [(set_attr "type" "sse")
++   (set_attr "atom_sse_attr" "lfence")
+    (set_attr "memory" "unknown")])
+ 
+ (define_insn "sse3_mwait"
+@@ -7885,6 +7907,7 @@ (define_insn "ssse3_phaddwv8hi3"
+   "TARGET_SSSE3"
+   "phaddw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -7913,6 +7936,7 @@ (define_insn "ssse3_phaddwv4hi3"
+   "TARGET_SSSE3"
+   "phaddw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -7967,6 +7991,7 @@ (define_insn "ssse3_phadddv4si3"
+   "TARGET_SSSE3"
+   "phaddd\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -7987,6 +8012,7 @@ (define_insn "ssse3_phadddv2si3"
+   "TARGET_SSSE3"
+   "phaddd\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -8073,6 +8099,7 @@ (define_insn "ssse3_phaddswv8hi3"
+   "TARGET_SSSE3"
+   "phaddsw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -8101,6 +8128,7 @@ (define_insn "ssse3_phaddswv4hi3"
+   "TARGET_SSSE3"
+   "phaddsw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -8187,6 +8215,7 @@ (define_insn "ssse3_phsubwv8hi3"
+   "TARGET_SSSE3"
+   "phsubw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -8215,6 +8244,7 @@ (define_insn "ssse3_phsubwv4hi3"
+   "TARGET_SSSE3"
+   "phsubw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -8269,6 +8299,7 @@ (define_insn "ssse3_phsubdv4si3"
+   "TARGET_SSSE3"
+   "phsubd\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -8289,6 +8320,7 @@ (define_insn "ssse3_phsubdv2si3"
+   "TARGET_SSSE3"
+   "phsubd\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -8375,6 +8407,7 @@ (define_insn "ssse3_phsubswv8hi3"
+   "TARGET_SSSE3"
+   "phsubsw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -8403,6 +8436,7 @@ (define_insn "ssse3_phsubswv4hi3"
+   "TARGET_SSSE3"
+   "phsubsw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "complex")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -8509,6 +8543,7 @@ (define_insn "ssse3_pmaddubsw128"
+   "TARGET_SSSE3"
+   "pmaddubsw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "simul")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -8547,6 +8582,7 @@ (define_insn "ssse3_pmaddubsw"
+   "TARGET_SSSE3"
+   "pmaddubsw\t{%2, %0|%0, %2}"
+   [(set_attr "type" "sseiadd")
++   (set_attr "atom_unit" "simul")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -8754,6 +8790,7 @@ (define_insn "ssse3_palignrti"
+   return "palignr\t{%3, %2, %0|%0, %2, %3}";
+ }
+   [(set_attr "type" "sseishft")
++   (set_attr "atom_unit" "sishuf")
+    (set_attr "prefix_data16" "1")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "TI")])
+@@ -8770,6 +8807,7 @@ (define_insn "ssse3_palignrdi"
+   return "palignr\t{%3, %2, %0|%0, %2, %3}";
+ }
+   [(set_attr "type" "sseishft")
++   (set_attr "atom_unit" "sishuf")
+    (set_attr "prefix_extra" "1")
+    (set_attr "mode" "DI")])
+ 
+@@ -8956,7 +8994,7 @@ (define_insn "sse4_1_movntdqa"
+ 		     UNSPEC_MOVNTDQA))]
+   "TARGET_SSE4_1"
+   "%vmovntdqa\t{%1, %0|%0, %1}"
+-  [(set_attr "type" "ssecvt")
++  [(set_attr "type" "ssemov")
+    (set_attr "prefix_extra" "1")
+    (set_attr "prefix" "maybe_vex")
+    (set_attr "mode" "TI")])
+--- gcc/config/i386/i386-c.c	(.../trunk)	(revision 144460)
++++ gcc/config/i386/i386-c.c	(.../branches/ix86/atom)	(revision 144601)
+@@ -119,6 +119,10 @@ ix86_target_macros_internal (int isa_fla
+       def_or_undef (parse_in, "__core2");
+       def_or_undef (parse_in, "__core2__");
+       break;
++    case PROCESSOR_ATOM:
++      def_or_undef (parse_in, "__atom");
++      def_or_undef (parse_in, "__atom__");
++      break;
+     /* use PROCESSOR_max to not set/unset the arch macro.  */
+     case PROCESSOR_max:
+       break;
+@@ -187,6 +191,9 @@ ix86_target_macros_internal (int isa_fla
+     case PROCESSOR_CORE2:
+       def_or_undef (parse_in, "__tune_core2__");
+       break;
++    case PROCESSOR_ATOM:
++      def_or_undef (parse_in, "__tune_atom__");
++      break;
+     case PROCESSOR_GENERIC32:
+     case PROCESSOR_GENERIC64:
+       break;
+--- gcc/config/i386/i386-protos.h	(.../trunk)	(revision 144460)
++++ gcc/config/i386/i386-protos.h	(.../branches/ix86/atom)	(revision 144601)
+@@ -85,6 +85,9 @@ extern void ix86_fixup_binary_operands_n
+ extern void ix86_expand_binary_operator (enum rtx_code,
+ 					 enum machine_mode, rtx[]);
+ extern int ix86_binary_operator_ok (enum rtx_code, enum machine_mode, rtx[]);
++extern bool ix86_lea_for_add_ok (enum rtx_code, rtx, rtx[]);
++extern bool ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn);
++extern bool ix86_agi_dependent (rtx set_insn, rtx use_insn);
+ extern void ix86_expand_unary_operator (enum rtx_code, enum machine_mode,
+ 					rtx[]);
+ extern rtx ix86_build_const_vector (enum machine_mode, bool, rtx);
+--- gcc/config/i386/i386.c	(.../trunk)	(revision 144460)
++++ gcc/config/i386/i386.c	(.../branches/ix86/atom)	(revision 144601)
+@@ -1036,6 +1036,79 @@ struct processor_costs core2_cost = {
+   1,                                    /* cond_not_taken_branch_cost.  */
+ };
+ 
++static const
++struct processor_costs atom_cost = {
++  COSTS_N_INSNS (1),			/* cost of an add instruction */
++  COSTS_N_INSNS (1) + 1,		/* cost of a lea instruction */
++  COSTS_N_INSNS (1),			/* variable shift costs */
++  COSTS_N_INSNS (1),			/* constant shift costs */
++  {COSTS_N_INSNS (3),			/* cost of starting multiply for QI */
++   COSTS_N_INSNS (4),			/*                               HI */
++   COSTS_N_INSNS (3),			/*                               SI */
++   COSTS_N_INSNS (4),			/*                               DI */
++   COSTS_N_INSNS (2)},			/*                               other */
++  0,					/* cost of multiply per each bit set */
++  {COSTS_N_INSNS (18),			/* cost of a divide/mod for QI */
++   COSTS_N_INSNS (26),			/*                          HI */
++   COSTS_N_INSNS (42),			/*                          SI */
++   COSTS_N_INSNS (74),			/*                          DI */
++   COSTS_N_INSNS (74)},			/*                          other */
++  COSTS_N_INSNS (1),			/* cost of movsx */
++  COSTS_N_INSNS (1),			/* cost of movzx */
++  8,					/* "large" insn */
++  17,					/* MOVE_RATIO */
++  2,					/* cost for loading QImode using movzbl */
++  {4, 4, 4},				/* cost of loading integer registers
++					   in QImode, HImode and SImode.
++					   Relative to reg-reg move (2).  */
++  {4, 4, 4},				/* cost of storing integer registers */
++  4,					/* cost of reg,reg fld/fst */
++  {12, 12, 12},				/* cost of loading fp registers
++					   in SFmode, DFmode and XFmode */
++  {6, 6, 8},				/* cost of storing fp registers
++					   in SFmode, DFmode and XFmode */
++  2,					/* cost of moving MMX register */
++  {8, 8},				/* cost of loading MMX registers
++					   in SImode and DImode */
++  {8, 8},				/* cost of storing MMX registers
++					   in SImode and DImode */
++  2,					/* cost of moving SSE register */
++  {8, 8, 8},				/* cost of loading SSE registers
++					   in SImode, DImode and TImode */
++  {8, 8, 8},				/* cost of storing SSE registers
++					   in SImode, DImode and TImode */
++  5,					/* MMX or SSE register to integer */
++  32,					/* size of l1 cache.  */
++  256,					/* size of l2 cache.  */
++  64,					/* size of prefetch block */
++  6,					/* number of parallel prefetches */
++  3,					/* Branch cost */
++  COSTS_N_INSNS (8),			/* cost of FADD and FSUB insns.  */
++  COSTS_N_INSNS (8),			/* cost of FMUL instruction.  */
++  COSTS_N_INSNS (20),			/* cost of FDIV instruction.  */
++  COSTS_N_INSNS (8),			/* cost of FABS instruction.  */
++  COSTS_N_INSNS (8),			/* cost of FCHS instruction.  */
++  COSTS_N_INSNS (40),			/* cost of FSQRT instruction.  */
++  {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
++   {libcall, {{32, loop}, {64, rep_prefix_4_byte},
++          {8192, rep_prefix_8_byte}, {-1, libcall}}}},
++  {{libcall, {{8, loop}, {15, unrolled_loop},
++          {2048, rep_prefix_4_byte}, {-1, libcall}}},
++   {libcall, {{24, loop}, {32, unrolled_loop},
++          {8192, rep_prefix_8_byte}, {-1, libcall}}}},
++  1,                                    /* scalar_stmt_cost.  */
++  1,                                    /* scalar load_cost.  */
++  1,                                    /* scalar_store_cost.  */
++  1,                                    /* vec_stmt_cost.  */
++  1,                                    /* vec_to_scalar_cost.  */
++  1,                                    /* scalar_to_vec_cost.  */
++  1,                                    /* vec_align_load_cost.  */
++  2,                                    /* vec_unalign_load_cost.  */
++  1,                                    /* vec_store_cost.  */
++  3,                                    /* cond_taken_branch_cost.  */
++  1,                                    /* cond_not_taken_branch_cost.  */
++};
++
+ /* Generic64 should produce code tuned for Nocona and K8.  */
+ static const
+ struct processor_costs generic64_cost = {
+@@ -1194,6 +1267,7 @@ const struct processor_costs *ix86_cost 
+ #define m_PENT4  (1<<PROCESSOR_PENTIUM4)
+ #define m_NOCONA  (1<<PROCESSOR_NOCONA)
+ #define m_CORE2  (1<<PROCESSOR_CORE2)
++#define m_ATOM  (1<<PROCESSOR_ATOM)
+ 
+ #define m_GEODE  (1<<PROCESSOR_GEODE)
+ #define m_K6  (1<<PROCESSOR_K6)
+@@ -1231,10 +1305,11 @@ static unsigned int initial_ix86_tune_fe
+   m_486 | m_PENT,
+ 
+   /* X86_TUNE_UNROLL_STRLEN */
+-  m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
++  m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
++  | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_DEEP_BRANCH_PREDICTION */
+-  m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
++  m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
+ 
+   /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
+      on simulation result. But after P4 was made, no performance benefit
+@@ -1246,12 +1321,12 @@ static unsigned int initial_ix86_tune_fe
+   ~m_386,
+ 
+   /* X86_TUNE_USE_SAHF */
+-  m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
++  m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
+   | m_NOCONA | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
+      partial dependencies.  */
+-  m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
++  m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
+   | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
+ 
+   /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
+@@ -1271,13 +1346,13 @@ static unsigned int initial_ix86_tune_fe
+   m_386 | m_486 | m_K6_GEODE,
+ 
+   /* X86_TUNE_USE_SIMODE_FIOP */
+-  ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
++  ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
+ 
+   /* X86_TUNE_USE_MOV0 */
+   m_K6,
+ 
+   /* X86_TUNE_USE_CLTD */
+-  ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
++  ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
+ 
+   /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx.  */
+   m_PENT4,
+@@ -1292,8 +1367,8 @@ static unsigned int initial_ix86_tune_fe
+   ~(m_PENT | m_PPRO),
+ 
+   /* X86_TUNE_PROMOTE_QIMODE */
+-  m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
+-  | m_GENERIC /* | m_PENT4 ? */,
++  m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
++  | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
+ 
+   /* X86_TUNE_FAST_PREFIX */
+   ~(m_PENT | m_486 | m_386),
+@@ -1317,26 +1392,28 @@ static unsigned int initial_ix86_tune_fe
+   m_PPRO,
+ 
+   /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop.  */
+-  m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
++  m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
++  | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_ADD_ESP_8 */
+-  m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
++  m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
+   | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_SUB_ESP_4 */
+-  m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
++  m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
++  | m_GENERIC,
+ 
+   /* X86_TUNE_SUB_ESP_8 */
+-  m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
++  m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
+   | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
+      for DFmode copies */
+-  ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
++  ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
+     | m_GENERIC | m_GEODE),
+ 
+   /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
+-  m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
++  m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
+      conflict here in between PPro/Pentium4 based chips that thread 128bit
+@@ -1347,7 +1424,8 @@ static unsigned int initial_ix86_tune_fe
+      shows that disabling this option on P4 brings over 20% SPECfp regression,
+      while enabling it on K8 brings roughly 2.4% regression that can be partly
+      masked by careful scheduling of moves.  */
+-  m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
++  m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
++  | m_AMDFAM10,
+ 
+   /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
+   m_AMDFAM10,
+@@ -1365,13 +1443,13 @@ static unsigned int initial_ix86_tune_fe
+   m_PPRO | m_PENT4 | m_NOCONA,
+ 
+   /* X86_TUNE_MEMORY_MISMATCH_STALL */
+-  m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
++  m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_PROLOGUE_USING_MOVE */
+-  m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
++  m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_EPILOGUE_USING_MOVE */
+-  m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
++  m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_SHIFT1 */
+   ~m_486,
+@@ -1380,29 +1458,32 @@ static unsigned int initial_ix86_tune_fe
+   m_AMD_MULTIPLE,
+ 
+   /* X86_TUNE_INTER_UNIT_MOVES */
+-  ~(m_AMD_MULTIPLE | m_GENERIC),
++  ~(m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
+ 
+   /* X86_TUNE_INTER_UNIT_CONVERSIONS */
+   ~(m_AMDFAM10),
+ 
+   /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
+      than 4 branch instructions in the 16 byte window.  */
+-  m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
++  m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
++  | m_GENERIC,
+ 
+   /* X86_TUNE_SCHEDULE */
+-  m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
++  m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
++  | m_GENERIC,
+ 
+   /* X86_TUNE_USE_BT */
+-  m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
++  m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_USE_INCDEC */
+-  ~(m_PENT4 | m_NOCONA | m_GENERIC),
++  ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
+ 
+   /* X86_TUNE_PAD_RETURNS */
+   m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_EXT_80387_CONSTANTS */
+-  m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
++  m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
++  | m_CORE2 | m_GENERIC,
+ 
+   /* X86_TUNE_SHORTEN_X87_SSE */
+   ~m_K8,
+@@ -1447,6 +1528,10 @@ static unsigned int initial_ix86_tune_fe
+      with a subsequent conditional jump instruction into a single
+      compare-and-branch uop.  */
+   m_CORE2,
++
++  /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
++     will impact LEA instruction selection. */
++  m_ATOM,
+ };
+ 
+ /* Feature tests against the various architecture variations.  */
+@@ -1472,10 +1557,11 @@ static unsigned int initial_ix86_arch_fe
+ };
+ 
+ static const unsigned int x86_accumulate_outgoing_args
+-  = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
++  = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
++    | m_GENERIC;
+ 
+ static const unsigned int x86_arch_always_fancy_math_387
+-  = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
++  = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
+     | m_NOCONA | m_CORE2 | m_GENERIC;
+ 
+ static enum stringop_alg stringop_alg = no_stringop;
+@@ -1952,7 +2038,8 @@ static const struct ptt processor_target
+   {&core2_cost, 16, 10, 16, 10, 16},
+   {&generic32_cost, 16, 7, 16, 7, 16},
+   {&generic64_cost, 16, 10, 16, 10, 16},
+-  {&amdfam10_cost, 32, 24, 32, 7, 32}
++  {&amdfam10_cost, 32, 24, 32, 7, 32},
++  {&atom_cost, 16, 7, 16, 7, 16}
+ };
+ 
+ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
+@@ -1970,6 +2057,7 @@ static const char *const cpu_names[TARGE
+   "prescott",
+   "nocona",
+   "core2",
++  "atom",
+   "geode",
+   "k6",
+   "k6-2",
+@@ -2528,6 +2616,9 @@ override_options (bool main_args_p)
+       {"core2", PROCESSOR_CORE2, CPU_CORE2,
+ 	PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
+ 	| PTA_SSSE3 | PTA_CX16},
++      {"atom", PROCESSOR_ATOM, CPU_ATOM,
++	PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
++	| PTA_SSSE3 | PTA_CX16},
+       {"geode", PROCESSOR_GEODE, CPU_GEODE,
+ 	PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
+       {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
+@@ -12866,6 +12957,263 @@ ix86_expand_unary_operator (enum rtx_cod
+     emit_move_insn (operands[0], dst);
+ }
+ 
++#define LEA_SEARCH_THRESHOLD 12
++
++/* Reach non-agu definition of op1 and op2 in insn's basic block. 
++   Search backward until 1. passed LEA_SEARCH_THRESHOLD instructions, 
++   or 2. reach BB boundary, or reach agu definition. 
++   Returns the distance between the non-agu definition point and insn.
++   If no definition point, returns -1 
++   TODO: Currently we have no way to distinguish if definition insn is a LEA. 
++   We just assume all definitions are non-lea. */
++static int
++distance_non_agu_define (rtx op1, rtx op2, rtx insn)
++{
++  rtx reg_op1 = REG_P (op1) ? op1 : NULL;
++  rtx reg_op2 = REG_P (op2) ? op2 : NULL;
++  basic_block bb = BLOCK_FOR_INSN (insn);
++  int distance = 0;
++
++  if (insn != BB_HEAD (bb))
++    {
++
++      rtx prev = PREV_INSN (insn);
++      while (prev && distance < LEA_SEARCH_THRESHOLD)
++	{
++	  if (INSN_P (prev))
++	    {
++	      distance++;
++	      if ((reg_op1 && reg_set_p (reg_op1, prev))
++		  || (reg_op2 && reg_set_p (reg_op2, prev)))
++		return distance ;
++	    }
++	  if (prev == BB_HEAD (bb))
++	    break;
++	  prev = PREV_INSN (prev);
++	}
++    }
++  
++  if (distance < LEA_SEARCH_THRESHOLD)
++    {
++      edge e;
++      edge_iterator ei;
++      bool simple_loop = false;
++  
++      FOR_EACH_EDGE (e, ei, bb->preds)
++	if (e->src == bb)
++	  {
++	    simple_loop = true;
++	    break;
++	  }
++  
++      if (simple_loop)
++	{
++	  rtx prev = BB_END (bb);
++	  while (prev
++		 && prev != insn
++		 && distance < LEA_SEARCH_THRESHOLD)
++	    {
++	      if (INSN_P (prev))
++		{
++		  distance++;
++		  if ((reg_op1 && reg_set_p (reg_op1, prev))
++		      || (reg_op2 && reg_set_p (reg_op2, prev)))
++		    return distance;
++		}
++	      prev = PREV_INSN (prev);
++	    }
++	}
++    }
++
++  return -1;
++}
++
++/* Return the distance between this insn and the next insn that uses 
++   result of this insn as memory address. 
++   Return -1 if not found such a use within LEA_SEARCH_THRESHOLD. */
++static int
++distance_agu_use (rtx op0, rtx insn)
++{
++  basic_block bb = BLOCK_FOR_INSN (insn);
++  int distance = 0;
++
++  if (insn != BB_END(bb))
++    {
++      rtx next = NEXT_INSN (insn);
++
++      while (next && distance < LEA_SEARCH_THRESHOLD)
++	{
++	  if (INSN_P (next))
++	    {
++	      distance++;
++	      if (reg_mentioned_by_mem_p (op0, next))
++		return distance;
++	      if (reg_set_p (op0, next))
++		return -1;
++	    }
++	  if (next == BB_END (bb))
++	    break;
++	  next = NEXT_INSN (next);
++	}
++    }
++
++  if (distance < LEA_SEARCH_THRESHOLD)
++    {
++      edge e;
++      edge_iterator ei;
++      bool simple_loop = false;
++  
++      FOR_EACH_EDGE (e, ei, bb->succs)
++        if (e->dest == bb)
++	  {
++	    simple_loop = true;
++	    break;
++	  }
++  
++      if (simple_loop)
++	{
++	  rtx next = BB_HEAD (bb);
++	  while (next && distance < LEA_SEARCH_THRESHOLD)
++	    {
++	      if (next == insn)
++		break;
++	      if (INSN_P (next))
++		{
++		  distance++;
++		  if (reg_mentioned_by_mem_p (op0, next))
++		    return distance;
++		  if (reg_set_p (op0, next))
++		    return -1;
++		}
++	      next = NEXT_INSN (next);
++	    }
++	}
++    }  
++
++  return -1;
++}
++
++/* Define this macro to tune LEA priority vs ADD, it take effect when
++   there is a dilemma of choicing LEA or ADD
++   Negative value: ADD is more preferred than LEA
++   Zero: Netrual
++   Positive value: LEA is more preferred than ADD*/
++#define IX86_LEA_PRIORITY 2
++
++/* Return true if it is ok to optimize an ADD operation to LEA
++   operation to avoid flag register consumation.  For the processors
++   like ATOM, if the destination register of LEA holds an actual
++   address which will be used soon, LEA is better and otherwise ADD
++   is better.  */
++
++bool
++ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
++                     rtx insn,
++                     rtx operands[])
++{
++  gcc_assert (REG_P (operands[0]));
++  gcc_assert (operands[1] && operands[2]);
++
++  if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
++    {
++      if (true_regnum (operands[0]) != true_regnum (operands[1]))
++        return true;
++      else
++        return false;
++    }
++
++  /* If a = b + c, (a!=b && a!=c), must use lea form. */
++  if (true_regnum (operands[0]) != true_regnum (operands[1])
++      && true_regnum (operands[0]) != true_regnum (operands[2]))
++    return true;
++  else    
++    {
++      int dist_define, dist_use;
++      dist_define = distance_non_agu_define (operands[1],
++					     operands[2], insn);
++      if (dist_define <= 0)
++        return true;
++
++      /* If this insn has both backward non-agu dependence and forward
++         agu dependence, the one with short distance take effect. */
++      dist_use = distance_agu_use (operands[0], insn);
++      if (dist_use <= 0
++	  || (dist_define + IX86_LEA_PRIORITY) < dist_use)
++        return false;
++
++      return true;
++    }
++}
++
++/* Return true if destination reg of SET_INSN is shift count of
++   USE_INSN.  */
++
++bool
++ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
++{
++  rtx set_pattern = PATTERN (set_insn);
++  rtx set_dest;
++  rtx shift_rtx;
++  rtx use_pattern;
++
++  /* Retrieve destination of set_insn */
++  switch (GET_CODE (set_pattern))
++    {
++    case SET:
++      set_dest = SET_DEST (set_pattern);
++      break;
++    case PARALLEL:
++      set_pattern = XVECEXP (set_pattern, 0, 0);
++      if (GET_CODE (set_pattern ) == SET)
++	{
++	  set_dest = SET_DEST (set_pattern);
++	  break;
++	}
++    default:
++      set_dest = NULL;
++      break;
++    }
++  if (!set_dest || !REG_P (set_dest))
++    return false;
++
++  /* Retrieve shift count of use_insn */
++  use_pattern = PATTERN (use_insn);
++  switch (GET_CODE (use_pattern))
++    {
++    case SET:
++      shift_rtx = XEXP (use_pattern, 1);
++      break;
++    case PARALLEL:
++      set_pattern = XVECEXP (use_pattern, 0, 0);
++      if (GET_CODE (set_pattern) == SET)
++	{
++	  shift_rtx = XEXP (set_pattern, 1);
++	  break;
++	}
++    default:
++      shift_rtx = NULL;
++      break;
++    }
++
++  if (shift_rtx 
++      && (GET_CODE (shift_rtx) == ASHIFT
++	  || GET_CODE (shift_rtx) == LSHIFTRT
++	  || GET_CODE (shift_rtx) == ASHIFTRT
++	  || GET_CODE (shift_rtx) == ROTATE
++	  || GET_CODE (shift_rtx) == ROTATERT))
++    {
++      rtx shift_count = XEXP (shift_rtx, 1);
++      gcc_assert (shift_count);
++
++      /* Return true if shift count is dest of set_insn */
++      if (REG_P (shift_count)
++	  && true_regnum (set_dest) == true_regnum (shift_count))
++	return true;
++    }
++
++  return false;
++}
++
+ /* Return TRUE or FALSE depending on whether the unary operator meets the
+    appropriate constraints.  */
+ 
+@@ -18985,6 +19333,7 @@ ix86_issue_rate (void)
+   switch (ix86_tune)
+     {
+     case PROCESSOR_PENTIUM:
++    case PROCESSOR_ATOM:
+     case PROCESSOR_K6:
+       return 2;
+ 
+@@ -19051,41 +19400,21 @@ ix86_flags_dependent (rtx insn, rtx dep_
+   return 1;
+ }
+ 
+-/* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
+-   address with operands set by DEP_INSN.  */
++/* Return true iff USE_INSN has a memory address with operands set by
++   SET_INSN.  */
+ 
+-static int
+-ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
++bool
++ix86_agi_dependent (rtx set_insn, rtx use_insn)
+ {
+-  rtx addr;
+-
+-  if (insn_type == TYPE_LEA
+-      && TARGET_PENTIUM)
+-    {
+-      addr = PATTERN (insn);
+-
+-      if (GET_CODE (addr) == PARALLEL)
+-	addr = XVECEXP (addr, 0, 0);
+-
+-      gcc_assert (GET_CODE (addr) == SET);
+-
+-      addr = SET_SRC (addr);
+-    }
+-  else
+-    {
+-      int i;
+-      extract_insn_cached (insn);
+-      for (i = recog_data.n_operands - 1; i >= 0; --i)
+-	if (MEM_P (recog_data.operand[i]))
+-	  {
+-	    addr = XEXP (recog_data.operand[i], 0);
+-	    goto found;
+-	  }
+-      return 0;
+-    found:;
+-    }
+-
+-  return modified_in_p (addr, dep_insn);
++  int i;
++  extract_insn_cached (use_insn);
++  for (i = recog_data.n_operands - 1; i >= 0; --i)
++    if (MEM_P (recog_data.operand[i]))
++      {
++	rtx addr = XEXP (recog_data.operand[i], 0);
++	return modified_in_p (addr, set_insn) != 0;
++      }
++  return false;
+ }
+ 
+ static int
+@@ -19113,8 +19442,19 @@ ix86_adjust_cost (rtx insn, rtx link, rt
+     {
+     case PROCESSOR_PENTIUM:
+       /* Address Generation Interlock adds a cycle of latency.  */
+-      if (ix86_agi_dependent (insn, dep_insn, insn_type))
+-	cost += 1;
++      if (insn_type == TYPE_LEA)
++	{
++	  rtx addr = PATTERN (insn);
++
++	  if (GET_CODE (addr) == PARALLEL)
++	    addr = XVECEXP (addr, 0, 0);
++
++	  gcc_assert (GET_CODE (addr) == SET);
++
++	  addr = SET_SRC (addr);
++	  if (modified_in_p (addr, dep_insn))
++	    cost += 1;
++	}
+ 
+       /* ??? Compares pair with jump/setcc.  */
+       if (ix86_flags_dependent (insn, dep_insn, insn_type))
+@@ -19123,7 +19463,7 @@ ix86_adjust_cost (rtx insn, rtx link, rt
+       /* Floating point stores require value to be ready one cycle earlier.  */
+       if (insn_type == TYPE_FMOV
+ 	  && get_attr_memory (insn) == MEMORY_STORE
+-	  && !ix86_agi_dependent (insn, dep_insn, insn_type))
++	  && !ix86_agi_dependent (dep_insn, insn))
+ 	cost += 1;
+       break;
+ 
+@@ -19146,7 +19486,7 @@ ix86_adjust_cost (rtx insn, rtx link, rt
+ 	 in parallel with previous instruction in case
+ 	 previous instruction is not needed to compute the address.  */
+       if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
+-	  && !ix86_agi_dependent (insn, dep_insn, insn_type))
++	  && !ix86_agi_dependent (dep_insn, insn))
+ 	{
+ 	  /* Claim moves to take one cycle, as core can issue one load
+ 	     at time and the next load can start cycle later.  */
+@@ -19175,7 +19515,7 @@ ix86_adjust_cost (rtx insn, rtx link, rt
+ 	 in parallel with previous instruction in case
+ 	 previous instruction is not needed to compute the address.  */
+       if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
+-	  && !ix86_agi_dependent (insn, dep_insn, insn_type))
++	  && !ix86_agi_dependent (dep_insn, insn))
+ 	{
+ 	  /* Claim moves to take one cycle, as core can issue one load
+ 	     at time and the next load can start cycle later.  */
+@@ -19192,6 +19532,7 @@ ix86_adjust_cost (rtx insn, rtx link, rt
+     case PROCESSOR_ATHLON:
+     case PROCESSOR_K8:
+     case PROCESSOR_AMDFAM10:
++    case PROCESSOR_ATOM:
+     case PROCESSOR_GENERIC32:
+     case PROCESSOR_GENERIC64:
+       memory = get_attr_memory (insn);
+@@ -19200,7 +19541,7 @@ ix86_adjust_cost (rtx insn, rtx link, rt
+ 	 in parallel with previous instruction in case
+ 	 previous instruction is not needed to compute the address.  */
+       if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
+-	  && !ix86_agi_dependent (insn, dep_insn, insn_type))
++	  && !ix86_agi_dependent (dep_insn, insn))
+ 	{
+ 	  enum attr_unit unit = get_attr_unit (insn);
+ 	  int loadcost = 3;
diff --git a/gcc44-c++-builtin-redecl.patch b/gcc44-c++-builtin-redecl.patch
index 77655ad..1f36f1f 100644
--- a/gcc44-c++-builtin-redecl.patch
+++ b/gcc44-c++-builtin-redecl.patch
@@ -9,7 +9,7 @@
 
 --- gcc/cp/decl.c.jj	2007-10-01 22:11:09.000000000 +0200
 +++ gcc/cp/decl.c	2007-10-02 11:39:46.000000000 +0200
-@@ -1988,23 +1988,21 @@ duplicate_decls (tree newdecl, tree oldd
+@@ -2001,23 +2001,21 @@ duplicate_decls (tree newdecl, tree oldd
  	  DECL_ARGUMENTS (olddecl) = DECL_ARGUMENTS (newdecl);
  	  DECL_RESULT (olddecl) = DECL_RESULT (newdecl);
  	}
@@ -40,8 +40,8 @@
 -	    }
 -
  	  DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
- 	  /* Don't clear out the arguments if we're redefining a function.  */
- 	  if (DECL_ARGUMENTS (olddecl))
+ 	  /* Don't clear out the arguments if we're just redeclaring a
+ 	     function.  */
 --- gcc/testsuite/gcc.dg/builtins-65.c.jj	2007-10-02 11:23:51.000000000 +0200
 +++ gcc/testsuite/gcc.dg/builtins-65.c	2007-10-02 11:24:12.000000000 +0200
 @@ -0,0 +1,25 @@
diff --git a/gcc44-cloog-dl.patch b/gcc44-cloog-dl.patch
index 6599cf8..cb8cbc7 100644
--- a/gcc44-cloog-dl.patch
+++ b/gcc44-cloog-dl.patch
@@ -1,6 +1,5 @@
 2009-01-27  Jakub Jelinek  <jakub at redhat.com>
 
-	* toplev.c (save_argv): No longer static.
 	* Makefile.in (BACKENDLIBS): Link against -ldl instead of -lcloog -lppl.
 	(graphite.o): Force -O, remove -fkeep-inline-functions.
 	* graphite.c: Include <dlfcn.h>.  Reference libcloog and libppl symbols
@@ -9,17 +8,6 @@
 	(gcc_type_for_iv_of_clast_loop): Rename stmt_for argument to stmt_fora.
 	(graphite_transform_loops): Call init_cloog_pointers.
 
---- gcc/toplev.c.jj	2008-12-09 23:59:10.000000000 +0100
-+++ gcc/toplev.c	2009-01-27 14:33:52.000000000 +0100
-@@ -128,7 +128,7 @@ static bool no_backend;
- const char *progname;
- 
- /* Copy of argument vector to toplev_main.  */
--static const char **save_argv;
-+const char **save_argv;
- 
- /* Name of top-level original source file (what was input to cpp).
-    This comes from the #-command at the beginning of the actual input.
 --- gcc/Makefile.in.jj	2009-01-26 20:50:38.000000000 +0100
 +++ gcc/Makefile.in	2009-01-27 14:18:10.000000000 +0100
 @@ -915,7 +915,7 @@ BUILD_LIBDEPS= $(BUILD_LIBIBERTY)
@@ -43,7 +31,7 @@
  	$(CC) $(CFLAGS) $(LDFLAGS) -o $@ mips-tfile.o version.o $(LIBS)
 --- gcc/graphite.c.jj	2009-01-24 19:59:02.000000000 +0100
 +++ gcc/graphite.c	2009-01-27 14:52:08.000000000 +0100
-@@ -59,6 +59,138 @@ along with GCC; see the file COPYING3.  
+@@ -59,6 +59,110 @@ along with GCC; see the file COPYING3.  
  #include "cloog/cloog.h"
  #include "graphite.h"
  
@@ -128,39 +116,11 @@
 +static bool
 +init_cloog_pointers (void)
 +{
-+  void *h = NULL;
-+  extern const char **save_argv;
-+  char *buf, *p;
-+  size_t len;
++  void *h;
 +
 +  if (cloog_pointers.inited)
 +    return cloog_pointers.h != NULL;
-+  len = progname - save_argv[0];
-+  buf = XALLOCAVAR (char, len + sizeof "libcloog.so.0");
-+  memcpy (buf, save_argv[0], len);
-+  strcpy (buf + len, "libcloog.so.0");
-+  len += sizeof "libcloog.so.0";
-+  p = strstr (buf, "/libexec/");
-+  if (p != NULL)
-+    {
-+      while (1)
-+	{
-+	  char *q = strstr (p + 8, "/libexec/");
-+	  if (q == NULL)
-+	    break;
-+	  p = q;
-+	}
-+      memmove (p + 4, p + 8, len - (p + 8 - buf));
-+      h = dlopen (buf, RTLD_LAZY);
-+      if (h == NULL)
-+	{
-+	  len = progname - save_argv[0];
-+	  memcpy (buf, save_argv[0], len);
-+	  strcpy (buf + len, "libcloog.so.0");
-+	}
-+    }
-+  if (h == NULL)
-+    h = dlopen (buf, RTLD_LAZY);
++  h = dlopen ("libcloog.so.0", RTLD_LAZY);
 +  cloog_pointers.h = h;
 +  if (h == NULL)
 +    return false;
diff --git a/gcc44-memmove-opt.patch b/gcc44-memmove-opt.patch
new file mode 100644
index 0000000..e35ade2
--- /dev/null
+++ b/gcc44-memmove-opt.patch
@@ -0,0 +1,128 @@
+2009-03-18  Jakub Jelinek  <jakub at redhat.com>
+
+	* builtins.c (fold_builtin_memory_op): Optimize memmove
+	into memcpy if we can prove source and destination don't overlap.
+
+	* gcc.dg/memmove-2.c: New test.
+	* gcc.dg/memmove-3.c: New test.
+
+--- gcc/builtins.c.jj	2009-03-04 20:06:31.000000000 +0100
++++ gcc/builtins.c	2009-03-18 18:19:28.000000000 +0100
+@@ -8882,17 +8882,74 @@ fold_builtin_memory_op (tree dest, tree 
+ 	     really mandatory?
+ 
+ 	     If either SRC is readonly or length is 1, we can use memcpy.  */
+-	  if (dest_align && src_align
+-	      && (readonly_data_expr (src)
+-	          || (host_integerp (len, 1)
+-		      && (MIN (src_align, dest_align) / BITS_PER_UNIT >=
+-			  tree_low_cst (len, 1)))))
++	  if (!dest_align || !src_align)
++	    return NULL_TREE;
++	  if (readonly_data_expr (src)
++	      || (host_integerp (len, 1)
++		  && (MIN (src_align, dest_align) / BITS_PER_UNIT >=
++		      tree_low_cst (len, 1))))
+ 	    {
+ 	      tree fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ 	      if (!fn)
+ 		return NULL_TREE;
+               return build_call_expr (fn, 3, dest, src, len);
+ 	    }
++
++	  /* If *src and *dest can't overlap, optimize into memcpy as well.  */
++	  srcvar = build_fold_indirect_ref (src);
++	  destvar = build_fold_indirect_ref (dest);
++	  if (srcvar && !TREE_THIS_VOLATILE (srcvar)
++	      && destvar && !TREE_THIS_VOLATILE (destvar))
++	    {
++	      tree src_base, dest_base, fn;
++	      HOST_WIDE_INT src_offset = 0, dest_offset = 0;
++	      HOST_WIDE_INT size = -1;
++	      HOST_WIDE_INT maxsize = -1;
++
++	      src_base = srcvar;
++	      if (handled_component_p (src_base))
++		src_base = get_ref_base_and_extent (src_base, &src_offset,
++						    &size, &maxsize);
++	      dest_base = destvar;
++	      if (handled_component_p (dest_base))
++		dest_base = get_ref_base_and_extent (dest_base, &dest_offset,
++						     &size, &maxsize);
++	      if (host_integerp (len, 1))
++		{
++		  maxsize = tree_low_cst (len, 1);
++		  if (maxsize
++		      > INTTYPE_MAXIMUM (HOST_WIDE_INT) / BITS_PER_UNIT)
++		    maxsize = -1;
++		  else
++		    maxsize *= BITS_PER_UNIT;
++		}
++	      else
++		maxsize = -1;
++	      if (SSA_VAR_P (src_base)
++		  && SSA_VAR_P (dest_base))
++		{
++		  if (operand_equal_p (src_base, dest_base, 0)
++		      && ranges_overlap_p (src_offset, maxsize,
++					   dest_offset, maxsize))
++		    return NULL_TREE;
++		}
++	      else if (TREE_CODE (src_base) == INDIRECT_REF
++		       && TREE_CODE (dest_base) == INDIRECT_REF)
++		{
++		  if (! operand_equal_p (TREE_OPERAND (src_base, 0),
++					 TREE_OPERAND (dest_base, 0), 0)
++		      || ranges_overlap_p (src_offset, maxsize,
++					   dest_offset, maxsize))
++		    return NULL_TREE;
++		}
++	      else
++		return NULL_TREE;
++
++	      fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
++	      if (!fn)
++		return NULL_TREE;
++	      return build_call_expr (fn, 3, dest, src, len);
++	    }
+ 	  return NULL_TREE;
+ 	}
+ 
+--- gcc/testsuite/gcc.dg/memmove-2.c.jj	2009-03-18 18:30:17.000000000 +0100
++++ gcc/testsuite/gcc.dg/memmove-2.c	2009-03-18 18:30:49.000000000 +0100
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fdump-tree-optimized" } */
++/* { dg-final { scan-tree-dump-times "memmove" 0 "optimized" } } */
++/* { dg-final { cleanup-tree-dump "optimized" } } */
++
++char a[40];
++extern void bar (char *);
++
++void
++foo (void)
++{
++  char b[10];
++  __builtin_memmove (&a[0], &a[20], 20);
++  __builtin_memmove (&b[1], &a[25], 9);
++  bar (b);
++}
+--- gcc/testsuite/gcc.dg/memmove-3.c.jj	2009-03-18 18:30:19.000000000 +0100
++++ gcc/testsuite/gcc.dg/memmove-3.c	2009-03-18 18:31:01.000000000 +0100
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fdump-tree-optimized" } */
++/* { dg-final { scan-tree-dump-times "memmove" 3 "optimized" } } */
++/* { dg-final { cleanup-tree-dump "optimized" } } */
++
++char a[40];
++struct A { char a[30]; };
++
++void
++foo (struct A *p, char *q, char *r)
++{
++  char b[10];
++  __builtin_memmove (&a[1], &a[19], 20);
++  __builtin_memmove (&p->a[1], &p->a[9], 10);
++  __builtin_memmove (q, r, 9);
++}
diff --git a/gcc44-power7-2.patch b/gcc44-power7-2.patch
new file mode 100644
index 0000000..519f8b9
--- /dev/null
+++ b/gcc44-power7-2.patch
@@ -0,0 +1,1369 @@
+2009-03-13  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	PR target/39457
+	* config/rs6000/rs6000.opt (-mdisallow-float-in-lr-ctr): Add
+	temporary debug switch.
+
+	* config/rs6000/rs6000.c (rs6000_hard_regno_mode_ok): Revert
+	behavior of disallowing 
+
+2009-03-13  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/vector.md (vec_extract_evenv2df): Delete, insn
+	causes problems in building spec 2006.
+	(vec_extract_oddv2df): Ditto.
+	(vec_pack_trunc_v2df): New expanders for VSX vectorized
+	conversions.
+	(vec_pack_sfix_trunc_v2df): Ditto.
+	(vec_pack_ufix_trunc_v2df): Ditto.
+	(vec_unpacks_hi_v4sf): Ditto.
+	(vec_unpacks_lo_v4sf): Ditto.
+	(vec_unpacks_float_hi_v4si): Ditto.
+	(vec_unpacks_float_lo_v4si): Ditto.
+	(vec_unpacku_float_hi_v4si): Ditto.
+	(vec_unpacku_float_lo_v4si): Ditto.
+
+	* config/rs6000/rs6000-protos.h (rs6000_vector_secondary_reload):
+	Declaration for new target hook.
+
+	* config/rs6000/rs6000.c (TARGET_SECONDARY_RELOAD): Add new target
+	hook for eventually fixing up the memory references for Altivec
+	and VSX reloads to be reg+reg instead of reg+offset.  Right now,
+	this is a stub function that prints debug information if
+	-mdebug=addr and then calls default_secondary_reload.
+	(rs6000_secondary_reload): Ditto.
+	(rs6000_vector_secondary_reload): Ditto.
+	(rs6000_builtin_conversion): Add support for V2DI/V2DF
+	conversions.
+	(rs6000_legitimate_offset_address_p): Test for the vector unit
+	doing the memory references.
+	(rs6000_legimize_reload_address): Ditto.
+	(rs6000_legitimize_address): Print extra \n if -mdebug=addr.
+	(rs6000_legitimize_reload_address): Ditto.
+	(rs6000_legitimate_address): Ditto.
+	(rs6000_mode_dependent_address): Ditto.
+	(bdesc_2arg): Add VSX builtins.
+	(bdesc_abs): Ditto.
+	(bdesc_1arg): Ditto.
+	(altivec_init_builtins): Ditto.
+	(rs6000_secondary_memory_needed_rtx): Add debug support if
+	-mdebug=addr.
+	(rs6000_preferred_reload_class): Ditto.
+	(rs6000_secondary_memory_needed): Ditto.
+	(rs6000_secondary_reload_class): Ditto.
+	(rs6000_cannot_change_mode_class): Ditto.
+
+	* config/rs6000/vsx.md (UNSPEC_VSX_*): Add unspecs for VSX
+	conversions.
+	(vsx_nabs<mode>): Add generator function.
+	(vsx_float<VSi><mode>2): Ditto.
+	(vsx_floatuns<VSi><mode>2): Ditto.
+	(vsx_xxmrghw): Ditto.
+	(vsx_xxmrglw): Ditto.
+	(vsx_xvcvdpsp): New VSX vector conversion insn.
+	(vsx_xvcvdpsxws): Ditto.
+	(vsx_xvcvdpuxws): Ditto.
+	(vsx_xvcvspdp): Ditto.
+	(vsx_xvcvsxwdp): Ditto.
+	(vsx_xvcvuxwdp): Ditto.
+	(vsx_reload_*): New insns for reload support.
+
+	* config/rs6000/rs6000.h: Fix a comment.
+
+	* config/rs6000/altivec.md (altivec_reload_*): New insns for
+	reload support.
+
+	* config/rs6000/rs6000.md (ptrsize): New mode attribute for the
+	pointer size.
+
+2009-03-10  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/vsx.md (vsx_concat_v2df): Add explicit 'f'
+	register class for scalar data, correct uses of the xxpermdi
+	instruction.
+	(vsx_set_v2df): Ditto.
+	(vsx_extract_v2df): Ditto.
+	(vsx_xxpermdi): Ditto.
+	(vsx_splatv2df): Ditto.
+	(vsx_xxmrghw): Use wf instead of v constraints.
+	(vsx_xxmrglw): Ditto.
+testsuite/
+2009-03-13  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	PR target/39457
+	* gcc.target/powerpc/pr39457.c: New test for PR39457.
+
+2009-03-13  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* gcc.target/powerpc/vsx-builtin-1.c: New test for builtins.
+	* gcc.target/powerpc/vsx-builtin-2.c: Ditto.
+
+--- gcc/config/rs6000/vector.md	(revision 144758)
++++ gcc/config/rs6000/vector.md	(revision 144843)
+@@ -496,23 +496,122 @@ (define_expand "vec_interleave_lowv2df"
+   "VECTOR_UNIT_VSX_P (V2DFmode)"
+   "")
+ 
+-;; For 2 element vectors, even/odd is the same as high/low
+-(define_expand "vec_extract_evenv2df"
+-  [(set (match_operand:V2DF 0 "vfloat_operand" "")
+-	(vec_concat:V2DF
+-	 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "")
+-			(parallel [(const_int 0)]))
+-	 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "")
+-			(parallel [(const_int 0)]))))]
+-  "VECTOR_UNIT_VSX_P (V2DFmode)"
+-  "")
++
++;; Convert double word types to single word types
++(define_expand "vec_pack_trunc_v2df"
++  [(match_operand:V4SF 0 "vsx_register_operand" "")
++   (match_operand:V2DF 1 "vsx_register_operand" "")
++   (match_operand:V2DF 2 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC"
++{
++  rtx r1 = gen_reg_rtx (V4SFmode);
++  rtx r2 = gen_reg_rtx (V4SFmode);
+ 
+-(define_expand "vec_extract_oddv2df"
+-  [(set (match_operand:V2DF 0 "vfloat_operand" "")
+-	(vec_concat:V2DF
+-	 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "")
+-			(parallel [(const_int 1)]))
+-	 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "")
+-			(parallel [(const_int 1)]))))]
+-  "VECTOR_UNIT_VSX_P (V2DFmode)"
+-  "")
++  emit_insn (gen_vsx_xvcvdpsp (r1, operands[1]));
++  emit_insn (gen_vsx_xvcvdpsp (r2, operands[2]));
++  emit_insn (gen_vec_extract_evenv4sf (operands[0], r1, r2));
++  DONE;
++})
++
++(define_expand "vec_pack_sfix_trunc_v2df"
++  [(match_operand:V4SI 0 "vsx_register_operand" "")
++   (match_operand:V2DF 1 "vsx_register_operand" "")
++   (match_operand:V2DF 2 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC"
++{
++  rtx r1 = gen_reg_rtx (V4SImode);
++  rtx r2 = gen_reg_rtx (V4SImode);
++
++  emit_insn (gen_vsx_xvcvdpsxws (r1, operands[1]));
++  emit_insn (gen_vsx_xvcvdpsxws (r2, operands[2]));
++  emit_insn (gen_vec_extract_evenv4si (operands[0], r1, r2));
++  DONE;
++})
++
++(define_expand "vec_pack_ufix_trunc_v2df"
++  [(match_operand:V4SI 0 "vsx_register_operand" "")
++   (match_operand:V2DF 1 "vsx_register_operand" "")
++   (match_operand:V2DF 2 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC"
++{
++  rtx r1 = gen_reg_rtx (V4SImode);
++  rtx r2 = gen_reg_rtx (V4SImode);
++
++  emit_insn (gen_vsx_xvcvdpuxws (r1, operands[1]));
++  emit_insn (gen_vsx_xvcvdpuxws (r2, operands[2]));
++  emit_insn (gen_vec_extract_evenv4si (operands[0], r1, r2));
++  DONE;
++})
++
++;; Convert single word types to double word
++(define_expand "vec_unpacks_hi_v4sf"
++  [(match_operand:V2DF 0 "vsx_register_operand" "")
++   (match_operand:V4SF 1 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
++{
++  rtx reg = gen_reg_rtx (V4SFmode);
++
++  emit_insn (gen_vec_interleave_highv4sf (reg, operands[1], operands[1]));
++  emit_insn (gen_vsx_xvcvspdp (operands[0], reg));
++  DONE;
++})
++
++(define_expand "vec_unpacks_lo_v4sf"
++  [(match_operand:V2DF 0 "vsx_register_operand" "")
++   (match_operand:V4SF 1 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
++{
++  rtx reg = gen_reg_rtx (V4SFmode);
++
++  emit_insn (gen_vec_interleave_lowv4sf (reg, operands[1], operands[1]));
++  emit_insn (gen_vsx_xvcvspdp (operands[0], reg));
++  DONE;
++})
++
++(define_expand "vec_unpacks_float_hi_v4si"
++  [(match_operand:V2DF 0 "vsx_register_operand" "")
++   (match_operand:V4SI 1 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)"
++{
++  rtx reg = gen_reg_rtx (V4SImode);
++
++  emit_insn (gen_vec_interleave_highv4si (reg, operands[1], operands[1]));
++  emit_insn (gen_vsx_xvcvsxwdp (operands[0], reg));
++  DONE;
++})
++
++(define_expand "vec_unpacks_float_lo_v4si"
++  [(match_operand:V2DF 0 "vsx_register_operand" "")
++   (match_operand:V4SI 1 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)"
++{
++  rtx reg = gen_reg_rtx (V4SImode);
++
++  emit_insn (gen_vec_interleave_lowv4si (reg, operands[1], operands[1]));
++  emit_insn (gen_vsx_xvcvsxwdp (operands[0], reg));
++  DONE;
++})
++
++(define_expand "vec_unpacku_float_hi_v4si"
++  [(match_operand:V2DF 0 "vsx_register_operand" "")
++   (match_operand:V4SI 1 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)"
++{
++  rtx reg = gen_reg_rtx (V4SImode);
++
++  emit_insn (gen_vec_interleave_highv4si (reg, operands[1], operands[1]));
++  emit_insn (gen_vsx_xvcvuxwdp (operands[0], reg));
++  DONE;
++})
++
++(define_expand "vec_unpacku_float_lo_v4si"
++  [(match_operand:V2DF 0 "vsx_register_operand" "")
++   (match_operand:V4SI 1 "vsx_register_operand" "")]
++  "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)"
++{
++  rtx reg = gen_reg_rtx (V4SImode);
++
++  emit_insn (gen_vec_interleave_lowv4si (reg, operands[1], operands[1]));
++  emit_insn (gen_vsx_xvcvuxwdp (operands[0], reg));
++  DONE;
++})
+--- gcc/config/rs6000/rs6000-protos.h	(revision 144758)
++++ gcc/config/rs6000/rs6000-protos.h	(revision 144843)
+@@ -72,6 +72,7 @@ extern bool rs6000_secondary_memory_need
+ extern bool rs6000_cannot_change_mode_class (enum machine_mode,
+ 					     enum machine_mode,
+ 					     enum reg_class);
++extern void rs6000_vector_secondary_reload (rtx, rtx, rtx, bool);
+ extern int paired_emit_vector_cond_expr (rtx, rtx, rtx,
+                                          rtx, rtx, rtx);
+ extern void paired_expand_vector_move (rtx operands[]);
+--- gcc/config/rs6000/rs6000.opt	(revision 144845)
++++ gcc/config/rs6000/rs6000.opt	(revision 144857)
+@@ -139,6 +139,9 @@ mvsx-scalar-memory
+ Target Report Var(TARGET_VSX_SCALAR_MEMORY)
+ If -mvsx, use VSX scalar memory reference instructions for scalar double (off by default)
+ 
++mdisallow-float-in-lr-ctr
++Target Undocumented Var(TARGET_DISALLOW_FLOAT_IN_LR_CTR) Init(-1)
++
+ mupdate
+ Target Report Var(TARGET_UPDATE) Init(1)
+ Generate load/store with update instructions
+--- gcc/config/rs6000/rs6000.c	(revision 144758)
++++ gcc/config/rs6000/rs6000.c	(revision 144843)
+@@ -1004,6 +1004,10 @@ static rtx rs6000_emit_vector_compare (e
+ 				       enum machine_mode);
+ static tree rs6000_stack_protect_fail (void);
+ 
++static enum reg_class rs6000_secondary_reload (bool, rtx, enum reg_class,
++					       enum machine_mode,
++					       struct secondary_reload_info *);
++
+ const int INSN_NOT_AVAILABLE = -1;
+ static enum machine_mode rs6000_eh_return_filter_mode (void);
+ 
+@@ -1333,6 +1337,9 @@ static const char alt_reg_names[][8] =
+ #undef TARGET_INSTANTIATE_DECLS
+ #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
+ 
++#undef TARGET_SECONDARY_RELOAD
++#define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
++
+ struct gcc_target targetm = TARGET_INITIALIZER;
+ 
+ /* Return number of consecutive hard regs needed starting at reg REGNO
+@@ -1448,10 +1448,16 @@ rs6000_hard_regno_mode_ok (int regno, en
+   if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+     return 1;
+ 
+-  /* Don't allow anything but word sized integers (aka pointers) in CTR/LR.  You
+-     really don't want to spill your floating point values to those
+-     registers.  Also do it for the old MQ register in the power.  */
+-  if (regno == CTR_REGNO || regno == LR_REGNO || regno == MQ_REGNO)
++  /* Don't allow anything but word sized integers (aka pointers) in CTR/LR.
++     You really don't want to spill your floating point values to those
++     registers.  Also do it for the old MQ register in the power.
++
++     While this is desirable in theory, disabling float to go in LR/CTR does
++     cause some regressions, so until they are taken care of, revert to the old
++     behavior by default for most power systems, but enable it for power7.  */
++  if ((TARGET_DISALLOW_FLOAT_IN_LR_CTR > 0
++       || (TARGET_DISALLOW_FLOAT_IN_LR_CTR < 0 && TARGET_VSX))
++      && (regno == CTR_REGNO || regno == LR_REGNO || regno == MQ_REGNO))
+     return (GET_MODE_CLASS (mode) == MODE_INT
+ 	    && GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
+ 
+@@ -2447,6 +2454,14 @@ rs6000_builtin_conversion (enum tree_cod
+     case FIX_TRUNC_EXPR:
+       switch (TYPE_MODE (type))
+ 	{
++	case V2DImode:
++	  if (!VECTOR_UNIT_VSX_P (V2DFmode))
++	    return NULL_TREE;
++
++	  return TYPE_UNSIGNED (type)
++	    ? rs6000_builtin_decls[VSX_BUILTIN_XVCVDPUXDS]
++	    : rs6000_builtin_decls[VSX_BUILTIN_XVCVDPSXDS];
++
+ 	case V4SImode:
+ 	  if (VECTOR_UNIT_NONE_P (V4SImode) || VECTOR_UNIT_NONE_P (V4SFmode))
+ 	    return NULL_TREE;
+@@ -2462,6 +2477,14 @@ rs6000_builtin_conversion (enum tree_cod
+     case FLOAT_EXPR:
+       switch (TYPE_MODE (type))
+ 	{
++	case V2DImode:
++	  if (!VECTOR_UNIT_VSX_P (V2DFmode))
++	    return NULL_TREE;
++
++	  return TYPE_UNSIGNED (type)
++	    ? rs6000_builtin_decls[VSX_BUILTIN_XVCVUXDSP]
++	    : rs6000_builtin_decls[VSX_BUILTIN_XVCVSXDSP];
++
+ 	case V4SImode:
+ 	  if (VECTOR_UNIT_NONE_P (V4SImode) || VECTOR_UNIT_NONE_P (V4SFmode))
+ 	    return NULL_TREE;
+@@ -2469,6 +2492,7 @@ rs6000_builtin_conversion (enum tree_cod
+ 	  return TYPE_UNSIGNED (type)
+ 	    ? rs6000_builtin_decls[VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF]
+ 	    : rs6000_builtin_decls[VECTOR_BUILTIN_FLOAT_V4SI_V4SF];
++
+ 	default:
+ 	  return NULL_TREE;
+ 	}
+@@ -4101,7 +4125,7 @@ rs6000_legitimate_offset_address_p (enum
+     case V2DImode:
+       /* AltiVec/VSX vector modes.  Only reg+reg addressing is valid and
+ 	 constant offset zero should not occur due to canonicalization.  */
+-      if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode))
++      if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
+ 	return false;
+       break;
+ 
+@@ -4441,6 +4465,7 @@ rs6000_legitimize_address (rtx x, rtx ol
+ 	}
+       else
+ 	fprintf (stderr, "NULL returned\n");
++      fprintf (stderr, "\n");
+     }
+ 
+   return ret;
+@@ -4776,8 +4801,7 @@ rs6000_legitimize_reload_address (rtx x,
+ 	   && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+ 	   && GET_CODE (XEXP (x, 1)) == CONST_INT
+ 	   && (INTVAL (XEXP (x, 1)) & 3) != 0
+-	   && !ALTIVEC_VECTOR_MODE (mode)
+-	   && !VSX_VECTOR_MODE (mode)
++	   && VECTOR_MEM_NONE_P (mode)
+ 	   && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
+ 	   && TARGET_POWERPC64)
+     {
+@@ -4798,8 +4822,7 @@ rs6000_legitimize_reload_address (rtx x,
+ 	   && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+ 				       || mode == DDmode || mode == TDmode
+ 				       || mode == DImode))
+-	   && !ALTIVEC_VECTOR_MODE (mode)
+-	   && !VSX_VECTOR_MODE (mode))
++	   && VECTOR_MEM_NONE_P (mode))
+     {
+       HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+       HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+@@ -4843,6 +4866,7 @@ rs6000_legitimize_reload_address (rtx x,
+ 	   /* Don't do this for TFmode or TDmode, since the result isn't
+ 	      offsettable.  The same goes for DImode without 64-bit gprs and
+ 	      DFmode and DDmode without fprs.  */
++	   && VECTOR_MEM_NONE_P (mode)
+ 	   && mode != TFmode
+ 	   && mode != TDmode
+ 	   && (mode != DImode || TARGET_POWERPC64)
+@@ -4918,6 +4942,8 @@ rs6000_legitimize_reload_address (rtx x,
+ 	  fprintf (stderr, "New address:\n");
+ 	  debug_rtx (ret);
+ 	}
++
++      fprintf (stderr, "\n");
+     }
+ 
+   return ret;
+@@ -5035,6 +5061,7 @@ rs6000_legitimate_address (enum machine_
+ 	       GET_MODE_NAME (mode),
+ 	       reg_ok_strict);
+       debug_rtx (orig_x);
++      fprintf (stderr, "\n");
+     }
+ 
+   return ret;
+@@ -5082,9 +5109,10 @@ rs6000_mode_dependent_address (rtx addr)
+   if (TARGET_DEBUG_ADDR)
+     {
+       fprintf (stderr,
+-	       "\nrs6000_mode_dependent_address: ret = %d\n",
+-	       (int)ret);
++	       "\nrs6000_mode_dependent_address: ret = %s\n",
++	       ret ? "true" : "false");
+       debug_rtx (addr);
++      fprintf (stderr, "\n");
+     }
+ 
+   return ret;
+@@ -7917,6 +7945,20 @@ static struct builtin_description bdesc_
+   { MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
+   { MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
+ 
++  { MASK_VSX, CODE_FOR_addv2df3, "__builtin_vsx_xvadddp", VSX_BUILTIN_XVADDDP },
++  { MASK_VSX, CODE_FOR_subv2df3, "__builtin_vsx_xvsubdp", VSX_BUILTIN_XVSUBDP },
++  { MASK_VSX, CODE_FOR_mulv2df3, "__builtin_vsx_xvmuldp", VSX_BUILTIN_XVMULDP },
++  { MASK_VSX, CODE_FOR_divv2df3, "__builtin_vsx_xvdivdp", VSX_BUILTIN_XVDIVDP },
++  { MASK_VSX, CODE_FOR_sminv2df3, "__builtin_vsx_xvmindp", VSX_BUILTIN_XVMINDP },
++  { MASK_VSX, CODE_FOR_smaxv2df3, "__builtin_vsx_xvmaxdp", VSX_BUILTIN_XVMAXDP },
++
++  { MASK_VSX, CODE_FOR_addv4sf3, "__builtin_vsx_xvaddsp", VSX_BUILTIN_XVADDSP },
++  { MASK_VSX, CODE_FOR_subv4sf3, "__builtin_vsx_xvsubsp", VSX_BUILTIN_XVSUBSP },
++  { MASK_VSX, CODE_FOR_mulv4sf3, "__builtin_vsx_xvmulsp", VSX_BUILTIN_XVMULSP },
++  { MASK_VSX, CODE_FOR_divv4sf3, "__builtin_vsx_xvdivsp", VSX_BUILTIN_XVDIVSP },
++  { MASK_VSX, CODE_FOR_sminv4sf3, "__builtin_vsx_xvminsp", VSX_BUILTIN_XVMINSP },
++  { MASK_VSX, CODE_FOR_smaxv4sf3, "__builtin_vsx_xvmaxsp", VSX_BUILTIN_XVMAXSP },
++
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
+@@ -8288,7 +8330,11 @@ static const struct builtin_description 
+   { MASK_ALTIVEC, CODE_FOR_absv16qi2, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI },
+   { MASK_ALTIVEC, CODE_FOR_altivec_abss_v4si, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI },
+   { MASK_ALTIVEC, CODE_FOR_altivec_abss_v8hi, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI }
++  { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI },
++  { MASK_VSX, CODE_FOR_absv2df2, "__builtin_vsx_xvabsdp", VSX_BUILTIN_XVABSDP },
++  { MASK_VSX, CODE_FOR_vsx_nabsv2df2, "__builtin_vsx_xvnabsdp", VSX_BUILTIN_XVNABSDP },
++  { MASK_VSX, CODE_FOR_absv4sf2, "__builtin_vsx_xvabssp", VSX_BUILTIN_XVABSSP },
++  { MASK_VSX, CODE_FOR_vsx_nabsv4sf2, "__builtin_vsx_xvnabssp", VSX_BUILTIN_XVNABSSP },
+ };
+ 
+ /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
+@@ -8314,6 +8360,11 @@ static struct builtin_description bdesc_
+   { MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
+ 
++  { MASK_VSX, CODE_FOR_negv2df2, "__builtin_vsx_xvnegdp", VSX_BUILTIN_XVNEGDP },
++  { MASK_VSX, CODE_FOR_sqrtv2df2, "__builtin_vsx_xvsqrtdp", VSX_BUILTIN_XVSQRTDP },
++  { MASK_VSX, CODE_FOR_negv4sf2, "__builtin_vsx_xvnegsp", VSX_BUILTIN_XVNEGSP },
++  { MASK_VSX, CODE_FOR_sqrtv4sf2, "__builtin_vsx_xvsqrtsp", VSX_BUILTIN_XVSQRTSP },
++
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS },
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS },
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL },
+@@ -8339,6 +8390,15 @@ static struct builtin_description bdesc_
+   { MASK_ALTIVEC|MASK_VSX, CODE_FOR_fix_truncv4sfv4si2, "__builtin_vec_fix_sfsi", VECTOR_BUILTIN_FIX_V4SF_V4SI },
+   { MASK_ALTIVEC|MASK_VSX, CODE_FOR_fixuns_truncv4sfv4si2, "__builtin_vec_fixuns_sfsi", VECTOR_BUILTIN_FIXUNS_V4SF_V4SI },
+ 
++  { MASK_VSX, CODE_FOR_floatv2div2df2, "__builtin_vsx_xvcvsxddp", VSX_BUILTIN_XVCVSXDDP },
++  { MASK_VSX, CODE_FOR_unsigned_floatv2div2df2, "__builtin_vsx_xvcvuxddp", VSX_BUILTIN_XVCVUXDDP },
++  { MASK_VSX, CODE_FOR_fix_truncv2dfv2di2, "__builtin_vsx_xvdpsxds", VSX_BUILTIN_XVCVDPSXDS },
++  { MASK_VSX, CODE_FOR_fixuns_truncv2dfv2di2, "__builtin_vsx_xvdpuxds", VSX_BUILTIN_XVCVDPUXDS },
++  { MASK_VSX, CODE_FOR_floatv4siv4sf2, "__builtin_vsx_xvcvsxwsp", VSX_BUILTIN_XVCVSXDSP },
++  { MASK_VSX, CODE_FOR_unsigned_floatv4siv4sf2, "__builtin_vsx_xvcvuxwsp", VSX_BUILTIN_XVCVUXWSP },
++  { MASK_VSX, CODE_FOR_fix_truncv4sfv4si2, "__builtin_vsx_xvspsxws", VSX_BUILTIN_XVCVSPSXWS },
++  { MASK_VSX, CODE_FOR_fixuns_truncv4sfv4si2, "__builtin_vsx_xvspuxws", VSX_BUILTIN_XVCVSPUXWS },
++
+   /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
+      end with SPE_BUILTIN_EVSUBFUSIAAW.  */
+   { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
+@@ -10484,6 +10544,8 @@ altivec_init_builtins (void)
+     = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
+   tree v4sf_ftype_v4sf
+     = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
++  tree v2df_ftype_v2df
++    = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
+   tree void_ftype_pcvoid_int_int
+     = build_function_type_list (void_type_node,
+ 				pcvoid_type_node, integer_type_node,
+@@ -10641,6 +10703,9 @@ altivec_init_builtins (void)
+ 	case V4SFmode:
+ 	  type = v4sf_ftype_v4sf;
+ 	  break;
++	case V2DFmode:
++	  type = v2df_ftype_v2df;
++	  break;
+ 	default:
+ 	  gcc_unreachable ();
+ 	}
+@@ -10960,6 +11025,18 @@ rs6000_common_init_builtins (void)
+   tree int_ftype_v8hi_v8hi
+     = build_function_type_list (integer_type_node,
+ 				V8HI_type_node, V8HI_type_node, NULL_TREE);
++  tree v2di_ftype_v2df
++    = build_function_type_list (V2DI_type_node,
++				V2DF_type_node, NULL_TREE);
++  tree v2df_ftype_v2df
++    = build_function_type_list (V2DF_type_node,
++				V2DF_type_node, NULL_TREE);
++  tree v2df_ftype_v2di
++    = build_function_type_list (V2DF_type_node,
++				V2DI_type_node, NULL_TREE);
++  tree v2df_ftype_v2df_v2df
++    = build_function_type_list (V2DF_type_node,
++				V2DF_type_node, V2DF_type_node, NULL_TREE);
+   tree v2df_ftype_v2df_v2df_v2df
+     = build_function_type_list (V2DF_type_node,
+ 				V2DF_type_node, V2DF_type_node,
+@@ -11136,6 +11213,9 @@ rs6000_common_init_builtins (void)
+ 	    case VOIDmode:
+ 	      type = opaque_ftype_opaque_opaque;
+ 	      break;
++	    case V2DFmode:
++	      type = v2df_ftype_v2df_v2df;
++	      break;
+ 	    case V4SFmode:
+ 	      type = v4sf_ftype_v4sf_v4sf;
+ 	      break;
+@@ -11285,6 +11365,8 @@ rs6000_common_init_builtins (void)
+ 	type = v16qi_ftype_int;
+       else if (mode0 == VOIDmode && mode1 == VOIDmode)
+ 	type = opaque_ftype_opaque;
++      else if (mode0 == V2DFmode && mode1 == V2DFmode)
++	type = v2df_ftype_v2df;
+       else if (mode0 == V4SFmode && mode1 == V4SFmode)
+ 	type = v4sf_ftype_v4sf;
+       else if (mode0 == V8HImode && mode1 == V16QImode)
+@@ -11310,6 +11392,10 @@ rs6000_common_init_builtins (void)
+ 	type = v4si_ftype_v4sf;
+       else if (mode0 == V4SFmode && mode1 == V4SImode)
+ 	type = v4sf_ftype_v4si;
++      else if (mode0 == V2DImode && mode1 == V2DFmode)
++	type = v2di_ftype_v2df;
++      else if (mode0 == V2DFmode && mode1 == V2DImode)
++	type = v2df_ftype_v2di;
+       else
+ 	gcc_unreachable ();
+ 
+@@ -12092,8 +12178,10 @@ rtx
+ rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
+ {
+   static bool eliminated = false;
++  rtx ret;
++
+   if (mode != SDmode)
+-    return assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
++    ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+   else
+     {
+       rtx mem = cfun->machine->sdmode_stack_slot;
+@@ -12105,8 +12193,21 @@ rs6000_secondary_memory_needed_rtx (enum
+ 	  cfun->machine->sdmode_stack_slot = mem;
+ 	  eliminated = true;
+ 	}
+-      return mem;
++      ret = mem;
++    }
++
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr, "rs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
++	       GET_MODE_NAME (mode));
++      if (!ret)
++	fprintf (stderr, "\tNULL_RTX\n");
++      else
++	debug_rtx (ret);
++      fprintf (stderr, "\n");
+     }
++
++  return ret;
+ }
+ 
+ static tree
+@@ -12140,6 +12241,54 @@ rs6000_check_sdmode (tree *tp, int *walk
+   return NULL_TREE;
+ }
+ 
++/* Inform reload about cases where moving X with a mode MODE to a register in
++   RCLASS requires an extra scratch or immediate register.  Return the class
++   needed for the immediate register.  */
++
++static enum reg_class
++rs6000_secondary_reload (bool in_p,
++			 rtx x,
++			 enum reg_class rclass,
++			 enum machine_mode mode,
++			 secondary_reload_info *sri)
++{
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr,
++	       "rs6000_secondary_reload, in_p = %s, rclass = %s, mode = %s\n",
++	       in_p ? "true" : "false", reg_class_names[rclass],
++	       GET_MODE_NAME (mode));
++      debug_rtx (x);
++      fprintf (stderr, "\n");
++    }
++
++  return default_secondary_reload (in_p, x, rclass, mode, sri);
++}
++
++/* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
++   to SP+reg addressing.  */
++
++void
++rs6000_vector_secondary_reload (rtx op0, rtx op1, rtx op2, bool to_mem_p)
++{
++  rtx memref = to_mem_p ? op0 : op1;
++  gcc_assert (MEM_P (memref));
++
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr, "rs6000_vector_secondary_reload, to_mem_p = %s\n",
++	       to_mem_p ? "true" : "false");
++      fprintf (stderr, "op0:\n");
++      debug_rtx (op0);
++      fprintf (stderr, "op1:\n");
++      debug_rtx (op1);
++      fprintf (stderr, "op2:\n");
++      debug_rtx (op2);
++      fprintf (stderr, "\n");
++    }
++
++  gcc_unreachable ();
++}
+ 
+ /* Allocate a 64-bit stack slot to be used for copying SDmode
+    values through if this function has any SDmode references.  */
+@@ -12212,32 +12361,44 @@ enum reg_class
+ rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
+ {
+   enum machine_mode mode = GET_MODE (x);
++  enum reg_class ret;
+ 
+   if (TARGET_VSX && VSX_VECTOR_MODE (mode) && x == CONST0_RTX (mode)
+       && VSX_REG_CLASS_P (rclass))
+-    return rclass;
++    ret = rclass;
+ 
+-  if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode) && rclass == ALTIVEC_REGS
+-      && easy_vector_constant (x, mode))
+-    return rclass;
++  else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode)
++	   && rclass == ALTIVEC_REGS && easy_vector_constant (x, mode))
++    ret = rclass;
+ 
+-  if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
+-    return NO_REGS;
++  else if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
++    ret = NO_REGS;
+ 
+-  if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
+-    return GENERAL_REGS;
++  else if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
++    ret = GENERAL_REGS;
+ 
+   /* For VSX, prefer the traditional registers.  */
+-  if (rclass == VSX_REGS)
++  else if (rclass == VSX_REGS)
+     {
+       if (mode == DFmode)
+-	return FLOAT_REGS;
++	ret = FLOAT_REGS;
+ 
+       if (ALTIVEC_VECTOR_MODE (mode))
+-	return ALTIVEC_REGS;
++	ret = ALTIVEC_REGS;
++    }
++  else
++    ret = rclass;
++
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr,
++	       "rs6000_preferred_reload_class, return %s, rclass = %s, x:\n",
++	       reg_class_names[ret], reg_class_names[rclass]);
++      debug_rtx (x);
++      fprintf (stderr, "\n");
+     }
+ 
+-  return rclass;
++  return ret;
+ }
+ 
+ /* If we are copying between FP or AltiVec registers and anything else, we need
+@@ -12251,31 +12412,46 @@ rs6000_secondary_memory_needed (enum reg
+ 				enum reg_class class2,
+ 				enum machine_mode mode)
+ {
++  bool ret;
++  bool vsx1;
++  bool vsx2;
++
+   if (class1 == class2)
+-    return false;
++    ret = false;
+ 
+-  if (TARGET_VSX && VSX_MOVE_MODE (mode) && VSX_REG_CLASS_P (class1)
+-      && VSX_REG_CLASS_P (class2))
+-    return false;
++  else if (TARGET_VSX && VECTOR_MEM_VSX_P (mode)
++	   && ((vsx1 = VSX_REG_CLASS_P (class1))
++	       || (vsx2 = VSX_REG_CLASS_P (class2))))
++    ret = (vsx1 != vsx2);
++
++  else if (class1 == FLOAT_REGS
++	   && (!TARGET_MFPGPR || !TARGET_POWERPC64
++	       || ((mode != DFmode)
++		   && (mode != DDmode)
++		   && (mode != DImode))))
++    ret = true;
++
++  else if (class2 == FLOAT_REGS
++	   && (!TARGET_MFPGPR || !TARGET_POWERPC64
++	       || ((mode != DFmode)
++		   && (mode != DDmode)
++		   && (mode != DImode))))
++    ret = true;
+ 
+-  if (class1 == FLOAT_REGS
+-       && (!TARGET_MFPGPR || !TARGET_POWERPC64
+-	   || ((mode != DFmode)
+-	       && (mode != DDmode)
+-	       && (mode != DImode))))
+-    return true;
++  else if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
++    ret = true;
+ 
+-  if (class2 == FLOAT_REGS
+-      && (!TARGET_MFPGPR || !TARGET_POWERPC64
+-	  || ((mode != DFmode)
+-	      && (mode != DDmode)
+-	      && (mode != DImode))))
+-    return true;
++  else
++    ret = false;
+ 
+-  if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
+-    return true;
++  if (TARGET_DEBUG_ADDR)
++    fprintf (stderr,
++	     "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
++	     "class2 = %s, mode = %s\n",
++	     ret ? "true" : "false", reg_class_names[class1],
++	     reg_class_names[class2], GET_MODE_NAME (mode));
+ 
+-  return false;
++  return ret;
+ }
+ 
+ /* Return the register class of a scratch register needed to copy IN into
+@@ -12287,6 +12463,7 @@ rs6000_secondary_reload_class (enum reg_
+ 			       enum machine_mode mode,
+ 			       rtx in)
+ {
++  enum reg_class ret = NO_REGS;
+   int regno;
+ 
+   if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
+@@ -12307,58 +12484,75 @@ rs6000_secondary_reload_class (enum reg_
+ 	      || GET_CODE (in) == HIGH
+ 	      || GET_CODE (in) == LABEL_REF
+ 	      || GET_CODE (in) == CONST))
+-	return BASE_REGS;
++	ret = BASE_REGS;
+     }
+ 
+-  if (GET_CODE (in) == REG)
++  if (ret == NO_REGS)
+     {
+-      regno = REGNO (in);
+-      if (regno >= FIRST_PSEUDO_REGISTER)
++      if (GET_CODE (in) == REG)
++	{
++	  regno = REGNO (in);
++	  if (regno >= FIRST_PSEUDO_REGISTER)
++	    {
++	      regno = true_regnum (in);
++	      if (regno >= FIRST_PSEUDO_REGISTER)
++		regno = -1;
++	    }
++	}
++      else if (GET_CODE (in) == SUBREG)
+ 	{
+ 	  regno = true_regnum (in);
+ 	  if (regno >= FIRST_PSEUDO_REGISTER)
+ 	    regno = -1;
+ 	}
+-    }
+-  else if (GET_CODE (in) == SUBREG)
+-    {
+-      regno = true_regnum (in);
+-      if (regno >= FIRST_PSEUDO_REGISTER)
++      else
+ 	regno = -1;
+-    }
+-  else
+-    regno = -1;
+ 
+-  /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
+-     into anything.  */
+-  if (rclass == GENERAL_REGS || rclass == BASE_REGS
+-      || (regno >= 0 && INT_REGNO_P (regno)))
+-    return NO_REGS;
+-
+-  /* Constants, memory, and FP registers can go into FP registers.  */
+-  if ((regno == -1 || FP_REGNO_P (regno))
+-      && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
+-    return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
+-
+-  /* Memory, and FP/altivec registers can go into fp/altivec registers under
+-     VSX.  */
+-  if (TARGET_VSX
+-      && (regno == -1 || VSX_REGNO_P (regno))
+-      && VSX_REG_CLASS_P (rclass))
+-    return NO_REGS;
++      /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
++	 into anything.  */
++      if (rclass == GENERAL_REGS || rclass == BASE_REGS
++	  || (regno >= 0 && INT_REGNO_P (regno)))
++	ret = NO_REGS;
++
++      /* Constants, memory, and FP registers can go into FP registers.  */
++      else if ((regno == -1 || FP_REGNO_P (regno))
++	       && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
++	ret = (mode != SDmode) ? NO_REGS : GENERAL_REGS;
++
++      /* Memory, and FP/altivec registers can go into fp/altivec registers under
++	 VSX.  */
++      else if (TARGET_VSX
++	       && (regno == -1 || VSX_REGNO_P (regno))
++	       && VSX_REG_CLASS_P (rclass))
++	ret = NO_REGS;
++
++      /* Memory, and AltiVec registers can go into AltiVec registers.  */
++      else if ((regno == -1 || ALTIVEC_REGNO_P (regno))
++	       && rclass == ALTIVEC_REGS)
++	ret = NO_REGS;
++
++      /* We can copy among the CR registers.  */
++      else if ((rclass == CR_REGS || rclass == CR0_REGS)
++	       && regno >= 0 && CR_REGNO_P (regno))
++	ret = NO_REGS;
++
++      /* Otherwise, we need GENERAL_REGS.  */
++      else
++	ret = GENERAL_REGS;
++    }
+ 
+-  /* Memory, and AltiVec registers can go into AltiVec registers.  */
+-  if ((regno == -1 || ALTIVEC_REGNO_P (regno))
+-      && rclass == ALTIVEC_REGS)
+-    return NO_REGS;
+-
+-  /* We can copy among the CR registers.  */
+-  if ((rclass == CR_REGS || rclass == CR0_REGS)
+-      && regno >= 0 && CR_REGNO_P (regno))
+-    return NO_REGS;
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr,
++	       "rs6000_secondary_reload_class, return %s, rclass = %s, "
++	       "mode = %s, input rtx:\n",
++	       reg_class_names[ret], reg_class_names[rclass],
++	       GET_MODE_NAME (mode));
++      debug_rtx (in);
++      fprintf (stderr, "\n");
++    }
+ 
+-  /* Otherwise, we need GENERAL_REGS.  */
+-  return GENERAL_REGS;
++  return ret;
+ }
+ 
+ /* Return nonzero if for CLASS a mode change from FROM to TO is invalid.  */
+@@ -12368,19 +12562,29 @@ rs6000_cannot_change_mode_class (enum ma
+ 				 enum machine_mode to,
+ 				 enum reg_class rclass)
+ {
+-  return (GET_MODE_SIZE (from) != GET_MODE_SIZE (to)
+-	  ? ((GET_MODE_SIZE (from) < 8 || GET_MODE_SIZE (to) < 8
+-	      || TARGET_IEEEQUAD)
+-	     && reg_classes_intersect_p (FLOAT_REGS, rclass))
+-	  : (((TARGET_E500_DOUBLE
+-	       && ((((to) == DFmode) + ((from) == DFmode)) == 1
+-		   || (((to) == TFmode) + ((from) == TFmode)) == 1
+-		   || (((to) == DDmode) + ((from) == DDmode)) == 1
+-		   || (((to) == TDmode) + ((from) == TDmode)) == 1
+-		   || (((to) == DImode) + ((from) == DImode)) == 1))
+-	      || (TARGET_SPE
+-		  && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1))
+-	     && reg_classes_intersect_p (GENERAL_REGS, rclass)));
++  bool ret = (GET_MODE_SIZE (from) != GET_MODE_SIZE (to)
++	      ? ((GET_MODE_SIZE (from) < 8 || GET_MODE_SIZE (to) < 8
++		  || TARGET_IEEEQUAD)
++		 && reg_classes_intersect_p (FLOAT_REGS, rclass))
++	      : (((TARGET_E500_DOUBLE
++		   && ((((to) == DFmode) + ((from) == DFmode)) == 1
++		       || (((to) == TFmode) + ((from) == TFmode)) == 1
++		       || (((to) == DDmode) + ((from) == DDmode)) == 1
++		       || (((to) == TDmode) + ((from) == TDmode)) == 1
++		       || (((to) == DImode) + ((from) == DImode)) == 1))
++		  || (TARGET_SPE
++		      && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1))
++		 && reg_classes_intersect_p (GENERAL_REGS, rclass)));
++
++  if (TARGET_DEBUG_ADDR)
++    fprintf (stderr,
++	     "rs6000_cannot_change_mode_class, return %s, from = %s, "
++	     "to = %s, rclass = %s\n",
++	     ret ? "true" : "false",
++	     GET_MODE_NAME (from), GET_MODE_NAME (to),
++	     reg_class_names[rclass]);
++
++  return ret;
+ }
+ 
+ /* Given a comparison operation, return the bit number in CCR to test.  We
+--- gcc/config/rs6000/vsx.md	(revision 144758)
++++ gcc/config/rs6000/vsx.md	(revision 144843)
+@@ -68,7 +68,13 @@ (define_mode_attr VSbit [(SI "32")
+ 			 (DI "64")])
+ 
+ (define_constants
+-  [(UNSPEC_VSX_CONCAT_V2DF	500)])
++  [(UNSPEC_VSX_CONCAT_V2DF	500)
++   (UNSPEC_VSX_XVCVDPSP		501)
++   (UNSPEC_VSX_XVCVDPSXWS	502)
++   (UNSPEC_VSX_XVCVDPUXWS	503)
++   (UNSPEC_VSX_XVCVSPDP		504)
++   (UNSPEC_VSX_XVCVSXWDP	505)
++   (UNSPEC_VSX_XVCVUXWDP	506)])
+ 
+ ;; VSX moves
+ (define_insn "*vsx_mov<mode>"
+@@ -245,7 +251,7 @@ (define_insn "*vsx_abs<mode>2"
+   "xvabs<VSs> %x0,%x1"
+   [(set_attr "type" "vecfloat")])
+ 
+-(define_insn "*vsx_nabs<mode>2"
++(define_insn "vsx_nabs<mode>2"
+   [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
+         (neg:VSX_F
+ 	 (abs:VSX_F
+@@ -417,14 +423,14 @@ (define_insn "*vsx_ftrunc<mode>2"
+   "xvr<VSs>piz %x0,%x1"
+   [(set_attr "type" "vecperm")])
+ 
+-(define_insn "*vsx_float<VSi><mode>2"
++(define_insn "vsx_float<VSi><mode>2"
+   [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
+ 	(float:VSX_F (match_operand:<VSI> 1 "vsx_register_operand" "<VSr>")))]
+   "VECTOR_UNIT_VSX_P (<MODE>mode)"
+   "xvcvsx<VSc><VSs> %x0,%x1"
+   [(set_attr "type" "vecfloat")])
+ 
+-(define_insn "*vsx_floatuns<VSi><mode>2"
++(define_insn "vsx_floatuns<VSi><mode>2"
+   [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
+ 	(unsigned_float:VSX_F (match_operand:<VSI> 1 "vsx_register_operand" "<VSr>")))]
+   "VECTOR_UNIT_VSX_P (<MODE>mode)"
+@@ -446,6 +452,62 @@ (define_insn "*vsx_fixuns_trunc<mode><VS
+   [(set_attr "type" "vecfloat")])
+ 
+ 
++;; VSX convert to/from double vector
++
++;; Convert from 64-bit to 32-bit types
++;; Note, favor the Altivec registers since the usual use of these instructions
++;; is in vector converts and we need to use the Altivec vperm instruction.
++
++(define_insn "vsx_xvcvdpsp"
++  [(set (match_operand:V4SF 0 "vsx_register_operand" "=v,?wa")
++	(unspec:V4SF [(match_operand:V2DF 1 "vsx_register_operand" "wd,wa")]
++		     UNSPEC_VSX_XVCVDPSP))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "xvcvdpsp %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "vsx_xvcvdpsxws"
++  [(set (match_operand:V4SI 0 "vsx_register_operand" "=v,?wa")
++	(unspec:V4SI [(match_operand:V2DF 1 "vsx_register_operand" "wd,wa")]
++		     UNSPEC_VSX_XVCVDPSXWS))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "xvcvdpsxws %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "vsx_xvcvdpuxws"
++  [(set (match_operand:V4SI 0 "vsx_register_operand" "=v,?wa")
++	(unspec:V4SI [(match_operand:V2DF 1 "vsx_register_operand" "wd,wa")]
++		     UNSPEC_VSX_XVCVDPUXWS))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "xvcvdpuxws %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++;; Convert from 32-bit to 64-bit types
++(define_insn "vsx_xvcvspdp"
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,?wa")
++	(unspec:V2DF [(match_operand:V4SF 1 "vsx_register_operand" "wf,wa")]
++		     UNSPEC_VSX_XVCVSPDP))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "xvcvspdp %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "vsx_xvcvsxwdp"
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,?wa")
++	(unspec:V2DF [(match_operand:V4SI 1 "vsx_register_operand" "wf,wa")]
++		     UNSPEC_VSX_XVCVSXWDP))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "xvcvsxwdp %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "vsx_xvcvuxwdp"
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,?wa")
++	(unspec:V2DF [(match_operand:V4SI 1 "vsx_register_operand" "wf,wa")]
++		     UNSPEC_VSX_XVCVUXWDP))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "xvcvuxwdp %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++
+ ;; VSX scalar double precision floating point operations
+ (define_insn"*vsx_adddf3"
+   [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
+@@ -753,8 +815,8 @@ (define_insn "*vsx_andc<mode>3"
+ (define_insn "vsx_concat_v2df"
+   [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,?wa")
+ 	(unspec:V2DF
+-	 [(match_operand:DF 1 "vsx_register_operand" "f,wa")
+-	  (match_operand:DF 2 "vsx_register_operand" "f,wa")]
++	 [(match_operand:DF 1 "vsx_register_operand" "ws,wa")
++	  (match_operand:DF 2 "vsx_register_operand" "ws,wa")]
+ 	 UNSPEC_VSX_CONCAT_V2DF))]
+   "VECTOR_UNIT_VSX_P (V2DFmode)"
+   "xxpermdi %x0,%x1,%x2,0"
+@@ -762,32 +824,37 @@ (define_insn "vsx_concat_v2df"
+ 
+ ;; Set a double into one element
+ (define_insn "vsx_set_v2df"
+-  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd")
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,?wa")
+ 	(vec_merge:V2DF
+-	 (match_operand:V2DF 1 "vsx_register_operand" "wd")
+-	 (vec_duplicate:V2DF (match_operand:DF 2 "vsx_register_operand" "ws"))
+-	 (match_operand:QI 3 "u5bit_cint_operand" "i")))]
++	 (match_operand:V2DF 1 "vsx_register_operand" "wd,wa")
++	 (vec_duplicate:V2DF (match_operand:DF 2 "vsx_register_operand" "ws,f"))
++	 (match_operand:QI 3 "u5bit_cint_operand" "i,i")))]
+   "VECTOR_UNIT_VSX_P (V2DFmode)"
+ {
+-  operands[3] = GEN_INT (INTVAL (operands[3]) & 1);
+-  return \"xxpermdi %x0,%x1,%x2,%3\";
++  if (INTVAL (operands[3]) == 0)
++    return \"xxpermdi %x0,%x1,%x2,1\";
++  else if (INTVAL (operands[3]) == 1)
++    return \"xxpermdi %x0,%x2,%x1,0\";
++  else
++    gcc_unreachable ();
+ }
+   [(set_attr "type" "vecperm")])
+ 
+ ;; Extract a DF element from V2DF
+ (define_insn "vsx_extract_v2df"
+-  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
+-	(vec_select:DF (match_operand:V2DF 1 "vsx_register_operand" "wd")
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws,f,?wa")
++	(vec_select:DF (match_operand:V2DF 1 "vsx_register_operand" "wd,wd,wa")
+ 		       (parallel
+-			[(match_operand:QI 2 "u5bit_cint_operand" "i")])))]
++			[(match_operand:QI 2 "u5bit_cint_operand" "i,i,i")])))]
+   "VECTOR_UNIT_VSX_P (V2DFmode)"
+ {
+-  operands[3] = GEN_INT (INTVAL (operands[2]) & 1);
++  gcc_assert (UINTVAL (operands[2]) <= 1);
++  operands[3] = GEN_INT (INTVAL (operands[2]) << 1);
+   return \"xxpermdi %x0,%x1,%x1,%3\";
+ }
+   [(set_attr "type" "vecperm")])
+ 
+-;; General V2DF permute
++;; General V2DF permute, extract_{high,low,even,odd}
+ (define_insn "vsx_xxpermdi"
+   [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd")
+ 	(vec_concat:V2DF
+@@ -799,6 +866,7 @@ (define_insn "vsx_xxpermdi"
+ 			 [(match_operand:QI 4 "u5bit_cint_operand" "i")]))))]
+   "VECTOR_UNIT_VSX_P (V2DFmode)"
+ {
++  gcc_assert ((UINTVAL (operands[2]) <= 1) && (UINTVAL (operands[4]) <= 1));
+   operands[5] = GEN_INT (((INTVAL (operands[2]) & 1) << 1)
+ 			 | (INTVAL (operands[4]) & 1));
+   return \"xxpermdi %x0,%x1,%x3,%5\";
+@@ -807,14 +875,15 @@ (define_insn "vsx_xxpermdi"
+ 
+ ;; V2DF splat
+ (define_insn "vsx_splatv2df"
+-  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,wd")
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,wd,wd")
+ 	(vec_duplicate:V2DF
+-	 (match_operand:DF 1 "input_operand" "ws,Z")))]
++	 (match_operand:DF 1 "input_operand" "ws,f,Z")))]
+   "VECTOR_UNIT_VSX_P (V2DFmode)"
+   "@
+    xxpermdi %x0,%x1,%x1,0
++   xxpermdi %x0,%x1,%x1,0
+    lxvdsx %x0,%y1"
+-  [(set_attr "type" "vecperm,vecload")])
++  [(set_attr "type" "vecperm,vecperm,vecload")])
+ 
+ ;; V4SF splat
+ (define_insn "*vsx_xxspltw"
+@@ -828,14 +897,14 @@ (define_insn "*vsx_xxspltw"
+   [(set_attr "type" "vecperm")])
+ 
+ ;; V4SF interleave
+-(define_insn "*vsx_xxmrghw"
+-  [(set (match_operand:V4SF 0 "register_operand" "=v")
+-        (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
++(define_insn "vsx_xxmrghw"
++  [(set (match_operand:V4SF 0 "register_operand" "=wf")
++        (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "wf")
+                                          (parallel [(const_int 0)
+                                                     (const_int 2)
+                                                     (const_int 1)
+                                                     (const_int 3)]))
+-                        (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
++                        (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "wf")
+                                          (parallel [(const_int 2)
+                                                     (const_int 0)
+                                                     (const_int 3)
+@@ -845,15 +914,15 @@ (define_insn "*vsx_xxmrghw"
+   "xxmrghw %x0,%x1,%x2"
+   [(set_attr "type" "vecperm")])
+ 
+-(define_insn "*vsx_xxmrglw"
+-  [(set (match_operand:V4SF 0 "register_operand" "=v")
++(define_insn "vsx_xxmrglw"
++  [(set (match_operand:V4SF 0 "register_operand" "=wf")
+         (vec_merge:V4SF
+-	 (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
++	 (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "wf")
+ 			  (parallel [(const_int 2)
+ 				     (const_int 0)
+ 				     (const_int 3)
+ 				     (const_int 1)]))
+-	 (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
++	 (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "wf")
+ 			  (parallel [(const_int 0)
+ 				     (const_int 2)
+ 				     (const_int 1)
+@@ -862,3 +931,26 @@ (define_insn "*vsx_xxmrglw"
+   "VECTOR_UNIT_VSX_P (V4SFmode)"
+   "xxmrglw %x0,%x1,%x2"
+   [(set_attr "type" "vecperm")])
++
++
++;; Reload patterns for VSX loads/stores.  We need a scratch register to convert
++;; the stack temporary address from reg+offset to reg+reg addressing.
++(define_expand "vsx_reload_<VSX_L:mode>_<P:ptrsize>_to_mem"
++  [(parallel [(match_operand:VSX_L 0 "memory_operand" "")
++              (match_operand:VSX_L 1 "register_operand" "=wa")
++              (match_operand:P 2 "register_operand" "=&b")])]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++{
++  rs6000_vector_secondary_reload (operands[0], operands[1], operands[2], true);
++  DONE;
++})
++
++(define_expand "vsx_reload_<VSX_L:mode>_<P:ptrsize>_to_reg"
++  [(parallel [(match_operand:VSX_L 0 "register_operand" "=wa")
++              (match_operand:VSX_L 1 "memory_operand" "")
++              (match_operand:P 2 "register_operand" "=&b")])]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++{
++  rs6000_vector_secondary_reload (operands[0], operands[1], operands[2], false);
++  DONE;
++})
+--- gcc/config/rs6000/rs6000.h	(revision 144758)
++++ gcc/config/rs6000/rs6000.h	(revision 144843)
+@@ -3388,7 +3388,7 @@ enum rs6000_builtins
+   VSX_BUILTIN_XXSPLTW,
+   VSX_BUILTIN_XXSWAPD,
+ 
+-  /* Combine VSX/Altivec builtins.  */
++  /* Combined VSX/Altivec builtins.  */
+   VECTOR_BUILTIN_FLOAT_V4SI_V4SF,
+   VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF,
+   VECTOR_BUILTIN_FIX_V4SF_V4SI,
+--- gcc/config/rs6000/altivec.md	(revision 144758)
++++ gcc/config/rs6000/altivec.md	(revision 144843)
+@@ -2685,3 +2685,27 @@ (define_expand "vec_unpacku_float_lo_v8h
+   emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
+   DONE;
+ }")
++
++
++;; Reload patterns for Altivec loads/stores.  We need a scratch register to
++;; convert the stack temporary address from reg+offset to reg+reg addressing.
++
++(define_expand "altivec_reload_<V:mode>_<P:ptrsize>_to_mem"
++  [(parallel [(match_operand:V 0 "memory_operand" "")
++              (match_operand:V 1 "register_operand" "=v")
++              (match_operand:P 2 "register_operand" "=&b")])]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
++{
++  rs6000_vector_secondary_reload (operands[0], operands[1], operands[2], true);
++  DONE;
++})
++
++(define_expand "altivec_reload_<V:mode>_<P:ptrsize>_to_reg"
++  [(parallel [(match_operand:V 0 "register_operand" "=v")
++              (match_operand:V 1 "memory_operand" "")
++              (match_operand:P 2 "register_operand" "=&b")])]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
++{
++  rs6000_vector_secondary_reload (operands[0], operands[1], operands[2], false);
++  DONE;
++})
+--- gcc/config/rs6000/rs6000.md	(revision 144758)
++++ gcc/config/rs6000/rs6000.md	(revision 144843)
+@@ -222,6 +222,10 @@ (define_mode_attr dbits [(QI "56") (HI "
+ ;; ISEL/ISEL64 target selection
+ (define_mode_attr sel [(SI "") (DI "64")])
+ 
++;; Suffix for reload patterns
++(define_mode_attr ptrsize [(SI "32bit")
++			   (DI "64bit")])
++
+ 
+ ;; Start with fixed-point load and store insns.  Here we put only the more
+ ;; complex forms.  Basic data transfer is done later.
+--- gcc/testsuite/gcc.target/powerpc/vsx-builtin-2.c	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/vsx-builtin-2.c	(revision 144843)
+@@ -0,0 +1,29 @@
++/* { dg-do compile { target { powerpc*-*-* } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* { dg-options "-O2 -mcpu=power7" } */
++/* { dg-final { scan-assembler "xvaddsp" } } */
++/* { dg-final { scan-assembler "xvsubsp" } } */
++/* { dg-final { scan-assembler "xvmulsp" } } */
++/* { dg-final { scan-assembler "xvdivsp" } } */
++/* { dg-final { scan-assembler "xvmaxsp" } } */
++/* { dg-final { scan-assembler "xvminsp" } } */
++/* { dg-final { scan-assembler "xvsqrtsp" } } */
++/* { dg-final { scan-assembler "xvabssp" } } */
++/* { dg-final { scan-assembler "xvnabssp" } } */
++
++void use_builtins (__vector float *p, __vector float *q, __vector float *r)
++{
++  __vector float tmp1 = *q;
++  __vector float tmp2 = *r;
++
++  *p++ = __builtin_vsx_xvaddsp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvsubsp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvmulsp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvdivsp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvmaxsp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvminsp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvabssp (tmp1);
++  *p++ = __builtin_vsx_xvnabssp (tmp1);
++  *p   = __builtin_vsx_xvsqrtsp (tmp1);
++}
+--- gcc/testsuite/gcc.target/powerpc/vsx-builtin-1.c	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/vsx-builtin-1.c	(revision 144843)
+@@ -0,0 +1,29 @@
++/* { dg-do compile { target { powerpc*-*-* } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* { dg-options "-O2 -mcpu=power7" } */
++/* { dg-final { scan-assembler "xvadddp" } } */
++/* { dg-final { scan-assembler "xvsubdp" } } */
++/* { dg-final { scan-assembler "xvmuldp" } } */
++/* { dg-final { scan-assembler "xvdivdp" } } */
++/* { dg-final { scan-assembler "xvmaxdp" } } */
++/* { dg-final { scan-assembler "xvmindp" } } */
++/* { dg-final { scan-assembler "xvsqrtdp" } } */
++/* { dg-final { scan-assembler "xvabsdp" } } */
++/* { dg-final { scan-assembler "xvnabsdp" } } */
++
++void use_builtins (__vector double *p, __vector double *q, __vector double *r)
++{
++  __vector double tmp1 = *q;
++  __vector double tmp2 = *r;
++
++  *p++ = __builtin_vsx_xvadddp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvsubdp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvmuldp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvdivdp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvmaxdp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvmindp (tmp1, tmp2);
++  *p++ = __builtin_vsx_xvabsdp (tmp1);
++  *p++ = __builtin_vsx_xvnabsdp (tmp1);
++  *p   = __builtin_vsx_xvsqrtdp (tmp1);
++}
+--- gcc/testsuite/gcc.target/powerpc/pr39457.c	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/pr39457.c	(revision 144857)
+@@ -0,0 +1,56 @@
++/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
++/* { dg-options "-m64 -O2 -mminimal-toc" } */
++
++/* PR 39457 -- fix breakage because the compiler ran out of registers and
++   wanted to stash a floating point value to the LR/CTR register.  */
++
++/* -O2 -m64 -mminimal-toc */
++typedef struct { void *s; } S;
++typedef void (*T1) (void);
++typedef void (*T2) (void *, void *, int, void *);
++char *fn1 (const char *, ...);
++void *fn2 (void);
++int fn3 (char *, int);
++int fn4 (const void *);
++int fn5 (const void *);
++long fn6 (void) __attribute__ ((__const__));
++int fn7 (void *, void *, void *);
++void *fn8 (void *, long);
++void *fn9 (void *, long, const char *, ...);
++void *fn10 (void *);
++long fn11 (void) __attribute__ ((__const__));
++long fn12 (void *, const char *, T1, T2, void *);
++void *fn13 (void *);
++long fn14 (void) __attribute__ ((__const__));
++extern void *v1;
++extern char *v2;
++extern int v3;
++
++void
++foo (void *x, char *z)
++{
++  void *i1, *i2;
++  int y;
++  if (v1)
++    return;
++  v1 = fn9 (fn10 (fn2 ()), fn6 (), "x", 0., "y", 0., 0);
++  y = 520 - (520 - fn4 (x)) / 2;
++  fn9 (fn8 (v1, fn6 ()), fn6 (), "wig", fn8 (v1, fn14 ()), "x", 18.0,
++       "y", 16.0, "wid", 80.0, "hi", 500.0, 0);
++  fn9 (fn10 (v1), fn6 (), "x1", 0., "y1", 0., "x2", 80.0, "y2",
++       500.0, "f", fn3 ("fff", 0x0D0DFA00), 0);
++  fn13 (((S *) fn8 (v1, fn6 ()))->s);
++  fn12 (fn8 (v1, fn11 ()), "ev", (T1) fn7, 0, fn8 (v1, fn6 ()));
++  fn9 (fn8 (v1, fn6 ()), fn6 (), "wig",
++       fn8 (v1, fn14 ()), "x", 111.0, "y", 14.0, "wid", 774.0, "hi",
++       500.0, 0);
++  v1 = fn9 (fn10 (v1), fn6 (), "x1", 0., "y1", 0., "x2", 774.0, "y2",
++            500.0, "f", fn3 ("gc", 0x0D0DFA00), 0);
++  fn1 (z, 0);
++  i1 = fn9 (fn8 (v1, fn6 ()), fn6 (), "pixbuf", x, "x",
++            800 - fn5 (x) / 2, "y", y - fn4 (x), 0);
++  fn12 (fn8 (i1, fn11 ()), "ev", (T1) fn7, 0, "/ok/");
++  fn12 (fn8 (i1, fn11 ()), "ev", (T1) fn7, 0, 0);
++  i2 = fn9 (fn8 (v1, fn6 ()), fn6 (), "txt", "OK", "fnt", v2, "x",
++            800, "y", y - fn4 (x) + 15, "ar", 0, "f", v3, 0);
++}
diff --git a/gcc44-power7.patch b/gcc44-power7.patch
new file mode 100644
index 0000000..42f8088
--- /dev/null
+++ b/gcc44-power7.patch
@@ -0,0 +1,9513 @@
+2009-03-09  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/vsx.md (vsx_store<mode>_update64): Use correct
+	registers for store with update.
+	(vsx_store<mode>_update32): Ditto.
+	(vsx_storedf_update<VSbit>): Ditto.
+
+2009-03-06  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* doc/invoke.texi (-mvsx-scalar-memory): New switch, to switch to
+	use VSX reg+reg addressing for all scalar double precision
+	floating point.
+	* config/rs6000/rs6000.opt (-vsx-scalar-memory): Ditto.
+	
+	* configure.ac (gcc_cv_as_powerpc_mfpgpr): Set binutils version to
+	2.19.2.
+	(gcc_cv_as_powerpc_cmpb): Ditto.
+	(gcc_cv_as_powerpc_dfp): Ditto.
+	(gcc_cv_as_powerpc_vsx): Ditto.
+	(gcc_cv_as_powerpc_popcntd): Ditto.
+	* configure: Regenerate.
+
+	* config/rs6000/vector.md (VEC_int): New mode attribute for vector
+	conversions.
+	(VEC_INT): Ditto.
+	(ftrunc<mode>2): Make this a define_expand.
+	(float<VEC_int><mode>2): New vector conversion support to add VSX
+	32 bit int/32 bit floating point convert and 64 bit int/64 bit
+	floating point vector instructions.
+	(unsigned_float<VEC_int><mode>2): Ditto.
+	(fix_trunc<mode><VEC_int>2): Ditto.
+	(fixuns_trunc<mode><VEC_int>2): Ditto.
+
+	* config/rs6000/predicates.md (easy_fp_constant): 0.0 is an easy
+	constant under VSX.
+	(indexed_or_indirect_operand): Add VSX load/store with update
+	support.
+
+	* config/rs6000/rs6000.c (rs6000_debug_addr): New global for
+	-mdebug=addr.
+	(rs6000_init_hard_regno_mode_ok): Add -mvsx-scalar-memory
+	support.
+	(rs6000_override_options): Add -mdebug=addr support.
+	(rs6000_builtin_conversion): Add VSX same size conversions.
+	(rs6000_legitimize_address): Add -mdebug=addr support.  Add
+	support for VSX load/store with update instructions.
+	(rs6000_legitimize_reload_address): Ditto.
+	(rs6000_legitimate_address): Ditto.
+	(rs6000_mode_dependent_address): Ditto.
+	(print_operand): Ditto.
+	(bdesc_1arg): Add builtins for conversion that calls either the
+	VSX or Altivec insn pattern.
+	(rs6000_common_init_builtins): Ditto.
+
+	* config/rs6000/vsx.md (VSX_I): Delete, no longer used.
+	(VSi): New mode attribute for conversions.
+	(VSI): Ditto.
+	(VSc): Ditto.
+	(vsx_mov<mode>): Add load/store with update support.
+	(vsx_load<mode>_update*): New insns for load/store with update
+	support.
+	(vsx_store<mode>_update*): Ditto.
+	(vsx_fmadd<mode>4): Generate correct code for V4SF.
+	(vsx_fmsub<mode>4): Ditto.
+	(vsx_fnmadd<mode>4_*): Ditto.
+	(vsx_fnmsub<mode>4_*): Ditto.
+	(vsx_float<VSi><mode>2): New insn for vector conversion.
+	(vsx_floatuns<VSi><mode>2): Ditto.
+	(vsx_fix_trunc<mode><VSi>2): Ditto.
+	(vsx_fixuns_trunc<mode><VSi>2): Ditto.
+	(vsx_xxmrghw): New insn for V4SF interleave.
+	(vsx_xxmrglw): Ditto.
+
+	* config/rs6000/rs6000.h (rs6000_debug_addr): -mdebug=addr
+	support.
+	(TARGET_DEBUG_ADDR): Ditto.
+	(rs6000_builtins): Add VSX instructions for eventual VSX
+	builtins.
+
+	* config/rs6000/altivec.md (altivec_vmrghsf): Don't do the altivec
+	instruction if VSX.
+	(altivec_vmrglsf): Ditto.
+
+	* config/rs6000/rs6000.md (movdf_hardfloat32): Add support for
+	using xxlxor to zero a floating register if VSX.
+	(movdf_hardfloat64_mfpgpr): Ditto.
+	(movdf_hardfloat64): Ditto.
+
+2009-03-03  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/vsx.md (vsx_xxmrglw): Delete for now, use Altivec.
+	(vsx_xxmrghw): Ditto.
+
+	* config/rs6000/altivec.md (altivec_vmrghsf): Use this insn even
+	on VSX systems.
+	(altivec_vmrglsf): Ditto.
+
+	* config/rs6000/rs6000.h (ASM_CPU_NATIVE_SPEC): Use %(asm_default)
+	if we are running as a cross compiler.
+
+	* config/rs6000/vector.md (vec_interleave_highv4sf): Use correct
+	constants for the extraction.
+	(vec_interleave_lowv4sf): Ditto.
+
+	* config/rs6000/rs6000.md (floordf2): Fix typo, make this a
+	define_expand, not define_insn.
+
+	* config/rs6000/aix53.h (ASM_CPU_SPEC): If -mcpu=native, call
+	%:local_cpu_detect(asm) to get the appropriate assembler flags for
+	the machine.
+	* config/rs6000/aix61.h (ASM_CPU_SPEC): Ditto.
+	* config/rs6000/rs6000.h (ASM_CPU_SPEC): Ditto.
+	(ASM_CPU_NATIVE_SPEC): New spec to get asm options if
+	-mcpu=native.
+	(EXTRA_SPECS): Add ASM_CPU_NATIVE_SPEC.
+
+	* config/rs6000/driver-rs6000.c (asm_names): New static array to
+	give the appropriate asm switches if -mcpu=native.
+	(host_detect_local_cpu): Add support for "asm".
+
+	* config/rs6000/rs6000.c (processor_target_table): Don't turn on
+	-misel by default for power7.
+
+2009-03-02  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/rs6000.c (rs6000_emit_swdivdf): Revert last
+	change, since we reverted the floating multiply/add changes.
+
+	* doc/md.texi (Machine Constraints): Update rs6000 constraints.
+
+	* config/rs6000/vector.md (neg<mode>2): Fix typo to enable
+	vectorized negation.
+	(ftrunc<mode>2): Move ftrunc expander here from altivec.md, and
+	add V2DF case.
+	(vec_interleave_highv4sf): Correct type to be V4SF, not V4SI.
+	(vec_extract_evenv2df): Add expander.
+	(vec_extract_oddv2df): Ditto.
+
+	* config/rs6000/vsx.md (vsx_ftrunc<mode>2): New VSX pattern for
+	truncate.
+	(vsx_ftruncdf2): Ditto.
+	(vsx_xxspltw): New instruction for word splat.
+	(vsx_xxmrglw): Whitespace changes.  Fix typo from V4SI to v4SF.
+	(vsx_xxmrghw): Ditto.
+
+	* config/rs6000/altivec.md (altivec_vmrghsf): Whitespace changes.
+	(altivec_vmrglsf): Ditto.
+	(altivec_vspltsf): Disable if we have VSX.
+	(altivec_ftruncv4sf2): Move expander to vector.md, rename insn.
+
+	* config/rs6000/rs6000.md (ftruncdf2): Add expander for VSX.
+
+	* config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok):
+	Reenable vectorizing V4SF under altivec.
+	(rs6000_hard_regno_mode_ok): Don't allow floating values in LR,
+	CTR, MQ.  Also, VRSAVE/VSCR are both 32-bits.
+	(rs6000_init_hard_regno_mode_ok): Print some of the special
+	registers if -mdebug=reg.
+
+	* config/rs6000/rs6000.md (floating multiply/add insns): Go back
+	to the original semantics for multiply add/subtract, particularly
+	with -ffast-math.
+
+	* config/rs6000/vsx.md (floating multiply/add insns): Mirror the
+	rs6000 floating point multiply/add insns in VSX.
+
+2009-03-01  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/vector.md (VEC_L): At TImode.
+	(VEC_M): Like VEC_L, except no TImode.
+	(VEC_base): Add TImode support.
+	(mov<mode>): Use VEC_M, not VEC_L.  If there is no extra
+	optimization for the move, just generate the standard move.
+	(vector_store_<mode>): Ditto.
+	(vector_load_<mode>): Ditto.
+	(vec_init<mode>): Use vec_init_operand predicate.
+
+	* config/rs6000/predicates.md (vec_init_operand): New predicate.
+
+	* config/rs6000/rs6000.c (rs6000_hard_regno_mode_ok): Allow mode
+	in a VSX register if there is a move operation.
+	(rs6000_vector_reg_class): Add internal register number to the
+	debug output.
+	(rs6000_init_hard_regno_mode_ok): Reorganize so all of the code
+	for a given type is located together.  If not -mvsx, make "ws"
+	constraint become NO_REGS, not FLOAT_REGS.  Change -mdebug=reg
+	output.
+	(rs6000_expand_vector_init): Before calling gen_vsx_concat_v2df,
+	make sure the two float arguments are copied into registers.
+	(rs6000_legitimate_offset_address_p): If no vsx or altivec, don't
+	disallow offset addressing.  Add V2DImode.  If TImode is handled
+	by the vector unit, allow indexed addressing.  Change default case
+	to be a fatal_insn instead of gcc_unreachable.
+	(rs6000_handle_altivec_attribute): Add support for vector double
+	if -mvsx.
+	(rs6000_register_move_cost): Add support for VSX_REGS.  Know that
+	under VSX, you can move between float and altivec registers
+	cheaply.
+	(rs6000_emit_swdivdf): Change the pattern of the negate multiply
+	and subtract operation.
+
+	* config/rs6000/vsx.md (VSX_I): Add TImode.
+	(VSX_L): Add TImode.
+	(VSm): Ditto.
+	(VSs): Ditto.
+	(VSr): Ditto.
+	(UNSPEC_VSX_CONCAT_V2DF): New constant.
+	(vsx_fre<mode>2): Add reciprocal estimate.
+	(vsx_freDF2): Ditto.
+	(vsx_fnmadd<mode>4): Rework pattern so it matches the
+	canonicalization that the compiler does.
+	(vsx_fnmsub<mode>4): Ditto.
+	(vsx_fnmaddDF4): Ditto.
+	(vsx_fnmsubDF4): Ditto.
+	(vsx_vsel<mode>): Use vsx_register_operand, not register_operand.
+	(vsx_adddf3): Ditto.
+	(vsx_subdf3): Ditto.
+	(vsx_muldf3): Ditto.
+	(vsx_divdf3): Ditto.
+	(vsx_negdf3): Ditto.
+	(vsx_absdf2): Ditto.
+	(vsx_nabsdf2): Ditto.
+	(vsx_copysign<mode>3): Add copysign support.
+	(vsx_copysignDF3): Ditto.
+	(vsx_concat_v2df): Rewrite to use an UNSPEC.
+	(vsx_set_v2df): Use "ws" constraint for scalar float.
+	(vsx_splatv2df): Ditto.
+
+	* config/rs6000/rs6000.h (VECTOR_UNIT_NONE_P): New macro to say no
+	vector support.
+	(VECTOR_MEM_NONE_P): Ditto.
+	(VSX_MOVE_MODE): Add V2DImode, TImode.
+
+	* config/rs6000/altivec.md (VM): Add V2DI, TI.
+	(build_vector_mask_for_load): Fix thinko in VSX case.
+
+	* config/rs6000/rs6000.md (fmaddsf4_powerpc): Name previously
+	unnamed pattern.  Fix insns so combine will generate the negative
+	multiply and subtract operations.
+	(fmaddsf4_power): Ditto.
+	(fmsubsf4_powerpc): Ditto.
+	(fmsubsf4_power): Ditto.
+	(fnmaddsf4_powerpc): Ditto.
+	(fnmaddsf4_power): Ditto.
+	(fnmsubsf4_powerpc): Ditto.
+	(fnmsubsf4_power): Ditto.
+	(fnsubsf4_powerpc2): Ditto.
+	(fnsubsf4_power2): Ditto.
+	(fmadddf4_powerpc): Ditto.
+	(fmadddf4_power): Ditto.
+	(fmsubdf4_powerpc): Ditto.
+	(fmsubdf4_power): Ditto.
+	(fnmadddf4_powerpc): Ditto.
+	(fnmadddf4_power): Ditto.
+	(fnmsubdf4_powerpc): Ditto.
+	(fnmsubdf4_power): Ditto.
+	(copysigndf3): If VSX, call the VSX copysign.
+	(fred): Split into an expander and insn.  On insn, disable if
+	VSX.
+	(movdf_hardfloat32): Rework VSX support.
+	(movdf_hardfloat64_mfpgpr): Ditto.
+	(movdf_hardfloat64): Ditto.
+	(movti_ppc64): If vector unit is handling TImode, disable this
+	pattern.
+
+2009-02-28  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/ppc-asm.h: If __VSX__ define the additional scalar
+	floating point registers that overlap with th Altivec registers.
+
+2009-02-27  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/spe.md (spe_fixuns_truncdfsi2): Rename from
+	fixuns_truncdfsi2, and move fixuns_truncdfsi2 into rs6000.md.
+
+	* config/rs6000/ppc-asm.h: If __ALTIVEC__ is defined, define the
+	Altivec registers.  If __VSX__ is defined, define the VSX
+	registers.
+
+	* config/rs6000/rs6000.opt (-mvsx-scalar-double): Make this on by
+	default.
+	(-misel): Make this a target mask instead of a variable.
+
+	* config/rs6000/rs6000.c (rs6000_isel): Delete global variable.
+	(rs6000_explicit_options): Delete isel field.
+	(POWERPC_MASKS): Add MASK_ISEL.
+	(processor_target_table): Add MASK_ISEL to 8540, 8548, e500mc, and
+	power7 processors.
+	(rs6000_override_options): Move -misel to a target mask.
+	(rs6000_handle_option): Ditto.
+	(rs6000_emit_int_cmove): Add support for 64-bit isel.
+
+	* config/rs6000/vsx.md (vsx_floatdidf2): New scalar floating point
+	pattern to support VSX conversion and rounding instructions.
+	(vsx_floatunsdidf2): Ditto.
+	(vsx_fix_trundfdi2): Ditto.
+	(vsx_fixuns_trundfdi2): Ditto.
+	(vsx_btrundf2): Ditto.
+	(vsx_floordf2): Ditto.
+	(vsx_ceildf2): Ditto.
+
+	* config/rs6000/rs6000.h (rs6000_isel): Delete global.
+	(TARGET_ISEL): Delete, since -misel is now a target mask.
+	(TARGET_ISEL64): New target option for -misel on 64-bit systems.
+
+	* config/rs6000/altivec.md (altivec_gtu<mode>): Use gtu, not geu.
+
+	* config/rs6000/rs6000.md (sel): New mode attribute for 64-bit
+	ISEL support.
+	(mov<mode>cc): Add 64-bit ISEL support.
+	(isel_signed_<mode>): Ditto.
+	(isel_unsigned_<mode>): Ditto.
+	(fixuns_truncdfsi2): Move expander here from spe.md.
+	(fixuns_truncdfdi2): New expander for unsigned floating point
+	conversion on power7.
+	(btruncdf2): Split into expander and insn.  On the insn, disallow
+	on VSX, so the VSX instruction will be generated.
+	(ceildf2): Ditto.
+	(floordf2): Ditto.
+	(floatdidf2): Ditto.
+	(fix_truncdfdi2): Ditto.
+	(smindi3): Define if we have -misel on 64-bit systems.
+	(smaxdi3): Ditto.
+	(umindi3): Ditto.
+	(umaxdi3): Ditto.
+
+	* config/rs6000/e500.h (CHECK_E500_OPTIONS): Disable -mvsx on
+	E500.
+
+2009-02-26  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/constraints.md ("wd" constraint):  Change the
+	variable that holds the register class to use.
+	("wf" constraint): Ditto.
+	("ws" constraint): Ditto.
+	("wa" constraint): Ditto.
+
+	* config/rs6000/rs6000.opt (-mvsx-vector-memory): Make this on by
+	default.
+	(-mvsx-vector-float): Ditto.
+
+	* config/rs6000/rs6000.c (rs6000_vector_reg_class): New global to
+	hold the register classes for the vector modes.
+	(rs6000_vsx_v4sf_regclass): Delete, move into
+	rs6000_vector_reg_class.
+	(rs6000_vsx_v2df_regclass): Ditto.
+	(rs6000_vsx_df_regclass): Ditto.
+	(rs6000_vsx_reg_class): Rename from rs6000_vsx_any_regclass.
+	(rs6000_hard_regno_mode_ok): Rework VSX, Altivec registers.
+	(rs6000_init_hard_regno_mode_ok): Setup rs6000_vector_reg_class.
+	Drop rs6000_vsx_*_regclass.  By default, use all 64 registers for
+	V4SF and V2DF.  Use VSX_REG_CLASS_P macro instead of separate
+	tests.  Update -mdebug=reg printout.
+	(rs6000_preferred_reload_class): If VSX, prefer FLOAT_REGS for
+	scalar floating point and ALTIVEC_REGS for the types that have
+	altivec instructions.
+	(rs6000_secondary_memory_needed): If VSX, we can copy between FPR
+	and Altivec registers without needed memory.
+	(rs6000_secondary_reload_class): Delete ATTRIBUTE_UNUSED from an
+	argument that is used.  If VSX, we can copy between FPR and
+	Altivec registers directly.
+
+	* config/rs6000/rs6000.h (VSX_MOVE_MODE): Add in the Altivec
+	types.
+	(rs6000_vsx_v4sf_regclass): Delete.
+	(rs6000_vsx_v2df_regclass): Ditto.
+	(rs6000_vsx_df_regclass): Ditto.
+	(rs6000_vsx_reg_class): Rename from rs6000_vsx_any_reg_class.
+	(rs6000_vector_reg_class): New global to map machine mode to the
+	preferred register class to use for that mode.
+	(VSX_REG_CLASS_P): New macro to return true for all of the
+	register classes VSX items can be in.
+
+2009-02-25  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* doc/invoke.texi (-mvsx-vector-memory): Rename from
+	-mvsx-vector-move.
+	(-mvsx-vector-logical): Delete.
+
+	* config/rs6000/aix53.h (ASM_CPU_SPEC): Add power7 support.
+	* config/rs6000/aix61.h (ASM_CPU_SPEC): Ditto.
+
+	* config/rs6000/vector.md (all insns); Change from using
+	rs6000_vector_info to VECTOR_MEM_* or VECTOR_UNIT_* macros.
+
+	* config/rs6000/constraints.md ("wi" constraint): Delete.
+	("wl" constraint): Ditto.
+	("ws" constraint): Change to use rs6000_vsx_df_regclass.
+
+	* config/rs6000/rs6000.opt (-mvsx-vector-memory): Rename from
+	-mvsx-vector-move.
+	(-mvsx-vector-float): Make default 0, not 1.
+	(-mvsx-vector-double): Make default -1, not 1.
+	(-mvsx-vector-logical): Delete.
+
+	* config/rs6000/rs6000.c (rs6000_vector_info): Delete.
+	(rs6000_vector_unit): New global array to say what vector unit is
+	used for arithmetic instructions.
+	(rs6000_vector_move): New global array to say what vector unit is
+	used for memory references.
+	(rs6000_vsx_int_regclass): Delete.
+	(rs6000_vsx_logical_regclass): Delete.
+	(rs6000_hard_regno_nregs_internal): Switch from using
+	rs6000_vector_info to rs6000_vector_unit, rs6000_vector_move.
+	(rs6000_hard_regno_mode_ok): Ditto.  Reformat code somewhat.
+	(rs6000_debug_vector_unit): New array to print vector unit
+	information if -mdebug=reg.
+	(rs6000_init_hard_regno_mode_ok): Rework to better describe VSX
+	and Altivec register sets.
+	(builtin_mask_for_load): Return 0 if -mvsx.
+	(rs6000_legitimize_reload_address): Allow AND in VSX addresses.
+	(rs6000_legitimate_address): Ditto.
+	(bdesc_3arg): Delete vselv2di builtin.
+	(rs6000_emit_minmax): Use rs6000_vector_unit instead of
+	rs6000_vector_info.
+	(rs6000_vector_mode_supported_p): Ditto.
+
+	* config/rs6000/vsx.md (all insns): Change from using
+	rs6000_vector_info to VECTOR_MEM_* and VECTOR_UNIT_* macros.
+	(VSr): Change to use "v" register class, not "wi".
+	(vsx_mov<mode>): Combine floating and integer.  Allow prefered
+	register class, and then use ?wa for all VSX registers.
+	(vsx_fmadddf4): Use ws constraint, not f.
+	(vsx_fmsubdf4): Ditto.
+	(vsx_fnmadddf4): Ditto.
+	(vsx_fnmsubdf4): Ditto.
+	(vsx_and<mode>3): Use preferred register class, and then ?wa to
+	catch all VSX registers.
+	(vsx_ior<mode>3): Ditto.
+	(vsx_xor<mode>3): Ditto.
+	(vsx_one_cmpl<mode>2): Ditto.
+	(vsx_nor<mode>3): Ditto.
+	(vsx_andc<mode>3): Ditto.
+
+	* config/rs6000/rs6000.h (rs6000_vector_struct): Delete.
+	(rs6000_vector_info): Ditto.
+	(rs6000_vector_unit): New global array to say whether a machine
+	mode arithmetic is handled by a particular vector unit.
+	(rs6000_vector_mem): New global array to say which vector unit to
+	use for moves.
+	(VECTOR_UNIT_*): New macros to say which vector unit to use.
+	(VECTOR_MEM_*): Ditto.
+	(rs6000_vsx_int_regclass): Delete.
+	(rs6000_vsx_logical_regclass): Delete.
+
+	* config/rs6000/altivec.md (all insns): Change from using
+	rs6000_vector_info to VECTOR_MEM_* and VECTOR_UNIT_* macros.
+	(build_vector_mask_for_load): Disable if VSX.
+
+	* config/rs6000/rs6000.md (all DF insns): Change how the VSX
+	exclusion is done.
+
+2009-02-24  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/rs6000.c (rs6000_debug_reg): New global.
+	(rs6000_debug_reg_print): New function to print register classes
+	for a given register range.
+	(rs6000_init_hard_regno_mode_ok): If -mdebug=reg, print out the
+	register class, call used, fixed information for most of the
+	registers.  Print the vsx register class variables.
+	(rs6000_override_options): Add -mdebug=reg support.
+
+	* config/rs6000/rs6000.h (rs6000_debug_reg): New global.
+	(TARGET_DEBUG_REG): New target switch for -mdebug=reg.
+
+2009-02-23  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* reload.c (subst_reloads): Change gcc_assert into a fatal_insn.
+
+	* config/rs6000/vector.md (VEC_I): Reorder iterator.
+	(VEC_L): Ditto.
+	(VEC_C): New iterator field for vector comparisons.
+	(VEC_base): New mode attribute that mapes the vector type to the
+	base type.
+	(all insns): Switch to use rs6000_vector_info to determine whether
+	the insn is valid instead of using TARGET_VSX or TARGET_ALTIVEC.
+	(vcond<mode>): Move here from altivec, and add VSX support.
+	(vcondu<mode>): Ditto.
+	(vector_eq<mode>): New expander for vector comparisons.
+	(vector_gt<mode>): Ditto.
+	(vector_ge<mode>): Ditto.
+	(vector_gtu<mode>): Ditto.
+	(vector_geu<mode>): Ditto.
+	(vector_vsel<mode>): New expander for vector select.
+	(vec_init<mode>): Move expander from altivec.md and generalize for
+	VSX.
+	(vec_set<mode>): Ditto.
+	(vec_extract<mode>): Ditto.
+	(vec_interleave_highv4sf): Ditto.
+	(vec_interleave_lowv4sf): Ditto.
+	(vec_interleave_highv2df): New expander for VSX.
+	(vec_interleave_lowv2df): Ditto.
+
+	* config/rs6000/contraints.md (toplevel): Add comment on the
+	available constraint letters.
+	("w" constraint): Delete, in favor of using "w" as a two letter
+	constraint.
+	("wd" constraint): New VSX constraint.
+	("wf" constraint): Ditto.
+	("wi" constraint): Ditto.
+	("wl" constraint): Ditto.
+	("ws" constraint): Ditto.
+	("wa" constraint): Ditto.
+
+	* config/rs6000/predicates.md (indexed_or_indirect_operand):
+	Disable altivec support allowing AND of memory address if -mvsx.
+
+	* config/rs6000/rs6000.opt (-mvsx-vector-move): New switches to
+	allow finer control over whether VSX, Altivec, or the traditional
+	instructions are used.
+	(-mvsx-scalar-move): Ditto.
+	(-mvsx-vector-float): Ditto.
+	(-mvsx-vector-double): Ditto.
+	(-mvsx-vector-logical): Ditto.
+	(-mvsx-scalar-double): Ditto.
+
+	* config/rs6000/rs6000.c (rs6000_vector_info): New global to hold
+	various information about which vector instruction set to use, and
+	the alignment of data.
+	(rs6000_vsx_v4sf_regclass): New global to hold VSX register
+	class.
+	(rs6000_vsx_v2df_regclass): Ditto.
+	(rs6000_vsx_df_regclass): Ditto.
+	(rs6000_vsx_int_regclass): Ditto.
+	(rs6000_vsx_logical_regclass): Ditto.
+	(rs6000_vsx_any_regclass): Ditto.
+	(rs6000_hard_regno_nregs_internal): Rewrite to fine tune
+	VSX/Altivec register selection.
+	(rs6000_hard_regno_mode_ok): Ditto.
+	(rs6000_init_hard_regno_mode_ok): Set up the vector information
+	globals based on the -mvsx-* switches.
+	(rs6000_override_options): Add warnings for -mvsx and
+	-mlittle-endian or -mavoid-indexed-addresses.
+	(rs6000_builtin_vec_perm): Add V2DF/V2DI support.
+	(rs6000_expand_vector_init): Add V2DF support.
+	(rs6000_expand_vector_set): Ditto.
+	(rs6000_expand_vector_extract): Ditto.
+	(avoiding_indexed_address_p): Add VSX support.
+	(rs6000_legitimize_address): Ditto.
+	(rs6000_legitimize_reload_address): Ditto.
+	(rs6000_legitimite_address): Ditto.
+	(USE_ALTIVEC_FOR_ARG_P): Ditto.
+	(function_arg_boundary): Ditto.
+	(function_arg_advance): Ditto.
+	(function_arg): Ditto.
+	(get_vec_cmp_insn): Delete.
+	(rs6000_emit_vector_vsx): New function for VSX vector compare.
+	(rs6000_emit_vector_altivec): New function for Altivec vector
+	compare.
+	(get_vsel_insn): Delete.
+	(rs6000_emit_vector_select): Ditto.
+	(rs6000_override_options): If -mvsx, turn on -maltivec by
+	default.
+	(rs6000_builtin_vec_perm): Add support for V2DI, V2DF modes.
+	(bdesc_3arg): Add vector select and vector permute builtins for
+	V2DI and V2DF types.  Switch to using the vector_* expander
+	instead of altivec_*.
+	(rs6000_init_builtins): Initialize new type nodes for VSX.
+	Initialize __vector double type.  Initialize common builtins for
+	VSX.
+	(rs6000_emit_vector_compare): Add VSX support.
+	(rs6000_vector_mode_supported_p): If VSX, support V2DF.
+
+	* config/rs6000/vsx.md (VSX_I): New iterator for integer modes.
+	(VSX_L): Reorder iterator.
+	(lx_<mode>_vsx): Delete, no longer needed.
+	(stx_<mode>_vsx): Ditto.
+	(all insns): Change to use vsx_<name> instead of <name>_vsx for
+	consistancy with the other rs6000 md files.  Change to use the new
+	"w" constraints for all insns.  Change to use rs6000_vector_info
+	deciding whether to execute the instruction or not.
+	(vsx_mov<mode>): Rewrite constraints so GPR registers are not
+	chosen as reload targets.  Split integer vector loads into a
+	separate insn, and favor the altivec register over the VSX fp
+	registers.
+	(vsx_fmadd<mode>4): Use <mode>, not <type>.
+	(vsx_fmsub<mode>4): Ditto.
+	(vsx_eq<mode>): New insns for V2DF/V4SF vector compare.
+	(vsx_gt<mode>): Ditto.
+	(vsx_ge<mode>): Ditto.
+	(vsx_vsel<mode>): New insns for VSX vector select.
+	(vsx_xxpermdi): New insn for DF permute.
+	(vsx_splatv2df): New insn for DF splat support.
+	(vsx_xxmrglw): New insns for DF interleave.
+	(vsx_xxmrghw): Ditto.
+
+	* config/rs6000/rs000.h (enum rs6000_vector): New enum to
+	describe which vector unit is being used.
+	(struct rs6000_vector_struct): New struct to describe the various
+	aspects about the current vector instruction set.
+	(rs6000_vector_info): New global to describe the current vector
+	instruction set.
+	(SLOW_UNALIGNED_ACCESS): If rs6000_vector_info has alignment
+	information for a type, use that.
+	(VSX_VECTOR_MOVE_MODE): New macro for all VSX vectors that are
+	supported by move instructions.
+	(VSX_MOVE_MODE): New macro for all VSX moves.
+	(enum rs6000_builtins): Add V2DI/V2DF vector select and permute
+	builtins.
+	(rs6000_builtin_type_index): Add new types for VSX vectors.
+	(rs6000_vsx_v4sf_regclass): New global to hold VSX register
+	class.
+	(rs6000_vsx_v2df_regclass): Ditto.
+	(rs6000_vsx_df_regclass): Ditto.
+	(rs6000_vsx_int_regclass): Ditto.
+	(rs6000_vsx_logical_regclass): Ditto.
+	(rs6000_vsx_any_regclass): Ditto.
+
+	* config/rs6000/altivec.md (UNSPEC_VCMP*): Delete unspec
+	constants no longer needed.
+	(UNSPEC_VSEL*): Ditto.
+	(altivec_lvx_<mode>): Delete, no longer needed.
+	(altivec_stvx_<mode>): Ditto.
+	(all insns): Rewrite to be consistant of altivec_<insn>.  Switch
+	to use rs6000_vector_info to determine whether to issue to the
+	altivec form of the instructions.
+	(mov<mode>_altivec): Rewrite constraints so GPR registers are not
+	chosen as reload targets.
+	(altivec_eq<mode>): Rewrite vector conditionals, permute, select
+	to use iterators, and work with VSX.
+	(altivec_gt<mode>): Ditto.
+	(altivec_ge<mode>): Ditto.
+	(altivec_gtu<mode>): Ditto.
+	(altivec_geu<mode>): Ditto.
+	(altivec_vsel<mode>): Ditto.
+	(altivec_vperm_<mode>): Ditto.
+	(altivec_vcmp*): Rewrite to not use unspecs any more, and use mode
+	iterators, add VSX support.
+	(vcondv4si): Move to vector.md.
+	(vconduv4si): Ditto.
+	(vcondv8hi): Ditto.
+	(vconduv8hi): Ditto.
+	(vcondv16qi): Ditto.
+	(vconduv16qi): Ditto.
+
+	* config/rs6000/rs6000.md (negdf2_fpr): Add support for
+	-mvsx-scalar-double.
+	(absdf2_fpr): Ditto.
+	(nabsdf2_fpr): Ditto.
+	(adddf3_fpr): Ditto.
+	(subdf3_fpr): Ditto.
+	(muldf3_fpr): Ditto.
+	(divdf3_fpr): Ditto.
+	(DF multiply/add patterns): Ditto.
+	(sqrtdf2): Ditto.
+	(movdf_hardfloat32): Add VSX support.
+	(movdf_hardfloat64_mfpgpr): Ditto.
+	(movdf_hardfloat64): Ditto.
+
+	* doc/invoke.texi (-mvsx-*): Add new vsx switches.
+
+2009-02-13  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config.in: Update two comments.
+
+	* config/rs6000/vector.md (VEC_L): Add V2DI type.
+	(move<mode>): Use VEC_L to get all vector types, and delete the
+	separate integer mode move definitions.
+	(vector_load_<mode>): Ditto.
+	(vector_store_<mode>): Ditto.
+	(vector move splitters): Move GPR register splitters here from
+	altivec.md.
+
+	* config/rs6000/constraints.md ("j"): Add "j" constraint to match
+	the mode's 0 value.
+
+	* config/rs6000/rs6000.c (rs6000_hard_regno_nregs_internal): Only
+	count the FPRs as being 128 bits if the mode is a VSX type.
+	(rs6000_hard_regno_mode_ok): Ditto.
+	(rs6000_emit_minmax): Use new VSX_MODE instead of separate tests.
+
+	* config/rs6000/vsx.md (VSX_L): Add V2DImode.
+	(VSm): Rename from VSX_mem, add modes for integer vectors.  Change
+	all uses.
+	(VSs): Rename from VSX_op, add modes for integer vectors.  Change
+	all uses.
+	(VSr): New mode address to give the register class.
+	(mov<mode>_vsx): Use VSr to get the register preferences.  Add
+	explicit 0 option.
+	(scalar double precision patterns): Do not use v register
+	constraint right now.
+	(logical patterns): Use VSr mode attribute for register
+	preferences.
+
+	* config/rs6000/rs6000.h (VSX_SCALAR_MODE): New macro.
+	(VSX_MODE): Ditto.
+
+	* config/rs6000/altivec.md (VM): New mode iterator for memory
+	operations.  Add V2DI mode.
+	(mov_altivec_<mode>): Disable if -mvsx for all modes, not just
+	V4SFmode.
+	(gpr move splitters): Move to vector.md.
+	(and<mode>3_altivec): Use VM mode iterator, not V.
+	(ior<mode>3_altivec): Ditto.
+	(xor<mode>3_altivec): Ditto.
+	(one_cmpl<mode>2_altivec): Ditto.
+	(nor<mode>3_altivec): Ditto.
+	(andc<mode>3_altivec): Ditto.
+
+	* config/rs6000/rs6000.md (movdf_hardfloat): Back out vsx changes.
+	(movdf_hardfloat64_vsx): Delete.
+
+2009-02-12  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* config/rs6000/vector.md: New file to abstract out the expanders
+	for vector operations from alitvec.md.
+
+	* config/rs6000/predicates.md (vsx_register_operand): New
+	predicate to match VSX registers.
+	(vfloat_operand): New predicate to match registers used for vector
+	floating point operations.
+	(vint_operand): New predicate to match registers used for vector
+	integer operations.
+	(vlogical_operand): New predicate to match registers used for
+	vector logical operations.
+
+	* config/rs6000/rs6000-protos.h (rs6000_hard_regno_nregs): Change
+	from a function to an array.
+	(rs6000_class_max_nregs): Add declaration.
+
+	* config/rs6000/t-rs6000 (MD_INCLUDES): Define to include all of
+	the .md files included by rs6000.md.
+
+	* config/rs6000/rs6000.c (rs6000_class_max_nregs): New global
+	array to pre-calculate CLASS_MAX_NREGS.
+	(rs6000_hard_regno_nregs): Change from a function to an array to
+	pre-calculate HARD_REGNO_NREGS.
+	(rs6000_hard_regno_nregs_internal): Rename from
+	rs6000_hard_regno_nregs and add VSX support.
+	(rs6000_hard_regno_mode_ok): Add VSX support, and switch to use
+	lookup table rs6000_hard_regno_nregs.
+	(rs6000_init_hard_regno_mode_ok): Add initialization of
+	rs6000_hard_regno_nregs, and rs6000_class_max_nregs global
+	arrays.
+	(rs6000_override_options): Add some warnings for things that are
+	incompatible with -mvsx.
+	(rs6000_legitimate_offset_address_p): Add V2DFmode.
+	(rs6000_conditional_register_usage): Enable altivec registers if
+	-mvsx.
+	(bdesc_2arg): Change the name of the nor pattern.
+	(altivec_expand_ld_builtin): Change the names of the load
+	patterns to be the generic vector loads.
+	(altivec_expand_st_builtin): Change the names of the store
+	patterns to be the generic vector stores.
+	(print_operand): Add 'x' to print out a VSX register properly.
+	(rs6000_emit_minmax): Directly emit the min/max patterns for VSX
+	and Altivec.
+
+	* config/rs6000/vsx.md: New file to add all of the VSX specific
+	instructions.  Add support for load, store, move, add, subtract,
+	multiply, multiply/add, divide, negate, absolute value, maximum,
+	minimum, sqrt, and, or, xor, and complent, xor, one's complement,
+	and nor instructions.
+
+	* config/rs6000/rs6000.h (UNITS_PER_VSX_WORD): Define.
+	(VSX_REGNO_P): New macro for VSX registers.
+	(VFLOAT_REGNO): New macro for vector floating point registers.
+	(VINT_REGNO): New macro for vector integer registers.
+	(VLOGICAL_REGNO): New macro for vector logical registers.
+	(VSX_VECTOR_MODE): New macro for vector modes supported by VSX.
+	(HARD_REGNO_NREGS): Switch to using pre-computed table.
+	(CLASS_MAX_NREGS): Ditto.
+
+	* config/rs6000/altivec.md (altivec_lvx_<mode>): Delete, repalced
+	by expanders in vector.md.
+	(altivec_stvx_<mode>): Ditto.
+	(mov<mode>): Ditto.
+	(mov_altivec_<mode>): Rename from mov<mode>_internal, and prefer
+	using VSX if available.
+	(addv4sf3_altivec): Rename from standard name, and prefer using
+	VSX if available.
+	(subv4sf3_altivec): Ditto.
+	(mulv4sf3_altivec): Ditto.
+	(smaxv4sf3_altivec): Ditto.
+	(sminv4sf3_altivec): Ditto.
+	(and<mode>3_altivec): Ditto.
+	(ior<mode>3_altivec): Ditto.
+	(xor<mode>3_altivec): Ditto.
+	(one_cmpl<mode>2): Ditto.
+	(nor<mode>3_altivec): Ditto.
+	(andc<mode>3_altivec): Ditto.
+	(absv4sf2_altivec): Ditto.
+	(vcondv4sf): Move to vector.md.
+
+	* config/rs6000/rs6000.md (negdf2_fpr): Add !TARGET_VSX to prefer
+	the version in vsx.md if -mvsx is available.
+	(absdf2_fpr): Ditto.
+	(nabsdf2_fpr): Ditto.
+	(adddf3_fpr): Ditto.
+	(subdf3_fpr): Ditto.
+	(muldf3_fpr): Ditto.
+	(multiply/add patterns): Ditto.
+	(movdf_hardfloat64): Disable if -mvsx.
+	(movdf_hardfloat64_vsx): Clone from movdf_hardfloat64 and add VSX
+	support.
+	(vector.md): Include new .md file.
+	(vsx.md): Ditto.
+
+2009-02-11  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* doc/invoke.texi (-mvsx, -mno-vsx): Document new switches.
+
+	* config/rs6000/linux64.opt (-mprofile-kernel): Move to being a
+	variable to reduce the number of target flag bits.
+	* config/rs6000/sysv4.opt (-mbit-align): Ditto.
+	(-mbit-word): Ditto.
+	(-mregnames): Ditto.
+	* config/rs6000/rs6000.opt (-mupdate, -mno-update): Ditto.
+	(-mvsx): New switch, enable VSX support.
+
+	* config/rs6000/rs6000-c.c (rs6000_cpu_cpp_builtins): Define
+	__VSX__ if the vector/scalar instruction set is available.
+
+	* config/rs6000/linux64.h (SUBSUBTARGET_OVERRIDE_OPTIONS): Change
+	to allow -mprofile-kernel to be a variable.
+
+	* config/rs6000/rs6000.c (processor_target_table): Set -mvsx for
+	power7 cpus.
+	(POWERPC_MASKS): Add -mvsx.
+
+	* config/rs6000/rs6000.h (ADDITIONAL_REGISTER_NAMES): Add VSX
+	register names for the registers that overlap with the floating
+	point and altivec registers.
+
+	* config/rs6000/sysv4.h (SUBTARGET_OVERRIDE_OPTIONS):
+	TARGET_NO_BITFIELD_WORD is now a variable, not a target mask.
+
+2009-02-11  Pat Haugen  <pthaugen at us.ibm.com>
+	    Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* doc/invoke.texi (-mpopcntd, -mno-popcntd): Document new
+	switches.
+
+	* configure.ac (powerpc*-*-*): Test for the assembler having the
+	popcntd instruction.
+	* configure: Regenerate.
+	* config.in (HAVE_AS_POPCNTD): Add default value for configure
+	test.
+
+	* config/rs6000/power7.md: New file.
+
+	* config/rs6000/rs6000-c.c (rs6000_cpu_cpp_builtins): Define
+	_ARCH_PWR7 if the popcntd instruction is available.
+
+	* config/rs6000/rs6000.opt (-mpopcntd): New switch to
+	enable/disable the use of the popcntd and popcntw instructions.
+	(-mfused-madd, -mno-fused-madd): Move to being a separate variable
+	because we are out of mask bits.
+
+	* config/rs6000/rs6000.c (power7_cost): Define.
+	(rs6000_override_options): Add Power7 support.
+	(rs6000_issue_rate): Ditto.
+	(insn_must_be_first_in_group): Ditto.
+	(insn_must_be_last_in_group): Ditto.
+	(rs6000_emit_popcount): Add support for using the popcntw and
+	popcntd instructions.
+
+	* config/rs6000/rs6000.h (ASM_CPU_POWER7_SPEC): Switch to using
+	popcntd as the test for a power7 assembler instead of vector
+	scalar instructions.
+	* (TARGET_POPCNTD): If assembler does not support the popcntd
+	instruction, disable using it.
+	(processor_type): Add Power7 entry.
+
+	* config/rs6000/rs6000.md (define_attr "cpu"):  Add power7.
+	(power7.md): Include it.
+	(andi./andis./nor. patterns): Change insn type to fast_compare.
+	(popcntwsi2): Add popcntw support.
+	(popcntddi2): Add popcntd support.
+
+testsuite/
+2009-03-01  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* gcc.target/powerpc/vsx-vector-1.c: Fix typos.
+	* gcc.target/powerpc/vsx-vector-2.c: Ditto.
+
+	* gcc.target/powerpc/vsx-vector-3.c: New file, test __vector
+	double.
+
+	* gcc.target/powerpc/vsx-vector-4.c: New file, test __vector float
+	uses VSX instructions if -mvsx.
+
+	* gcc.dg/vmx/vmx.exp (DEFAULT_VMXCLFAGS): Add -mno-vsx.
+
+	* lib/target-supports.exp (check_vsx_hw_available): New function
+	to test for VSX.
+	(check_vmx_hw_available): Add -mno-vsx to options.
+	(check_effective_target_powerpc_vsx_ok): New function to check if
+	the powerpc compiler can support VSX.
+
+2009-02-27  Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* gcc.target/powerpc/vsx-vector-1.c: New file to test VSX code
+	generation.
+	* gcc.target/powerpc/vsx-vector-2.c: Ditto.
+
+2009-02-01 Michael Meissner  <meissner at linux.vnet.ibm.com>
+
+	* gcc.target/powerpc/popcount-2.c: New file for power7 support.
+	* gcc.target/powerpc/popcount-3.c: Ditto.
+
+--- gcc/doc/invoke.texi	(.../trunk)	(revision 144557)
++++ gcc/doc/invoke.texi	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -712,7 +712,8 @@ See RS/6000 and PowerPC Options.
+ -maltivec  -mno-altivec @gol
+ -mpowerpc-gpopt  -mno-powerpc-gpopt @gol
+ -mpowerpc-gfxopt  -mno-powerpc-gfxopt @gol
+--mmfcrf  -mno-mfcrf  -mpopcntb  -mno-popcntb  -mfprnd  -mno-fprnd @gol
++-mmfcrf  -mno-mfcrf  -mpopcntb  -mno-popcntb -mpopcntd -mno-popcntd @gol
++-mfprnd  -mno-fprnd @gol
+ -mcmpb -mno-cmpb -mmfpgpr -mno-mfpgpr -mhard-dfp -mno-hard-dfp @gol
+ -mnew-mnemonics  -mold-mnemonics @gol
+ -mfull-toc   -mminimal-toc  -mno-fp-in-toc  -mno-sum-in-toc @gol
+@@ -726,7 +727,9 @@ See RS/6000 and PowerPC Options.
+ -mstrict-align  -mno-strict-align  -mrelocatable @gol
+ -mno-relocatable  -mrelocatable-lib  -mno-relocatable-lib @gol
+ -mtoc  -mno-toc  -mlittle  -mlittle-endian  -mbig  -mbig-endian @gol
+--mdynamic-no-pic  -maltivec  -mswdiv @gol
++-mdynamic-no-pic  -maltivec -mswdiv -mvsx -mvsx-vector-memory @gol
++-mvsx-vector-float -mvsx-vector-double @gol
++-mvsx-scalar-double -mvsx-scalar-memory @gol
+ -mprioritize-restricted-insns=@var{priority} @gol
+ -msched-costly-dep=@var{dependence_type} @gol
+ -minsert-sched-nops=@var{scheme} @gol
+@@ -13512,6 +13515,8 @@ These @samp{-m} options are defined for 
+ @itemx -mno-mfcrf
+ @itemx -mpopcntb
+ @itemx -mno-popcntb
++ at itemx -mpopcntd
++ at itemx -mno-popcntd
+ @itemx -mfprnd
+ @itemx -mno-fprnd
+ @itemx -mcmpb
+@@ -13536,6 +13541,8 @@ These @samp{-m} options are defined for 
+ @opindex mno-mfcrf
+ @opindex mpopcntb
+ @opindex mno-popcntb
++ at opindex mpopcntd
++ at opindex mno-popcntd
+ @opindex mfprnd
+ @opindex mno-fprnd
+ @opindex mcmpb
+@@ -13585,6 +13592,9 @@ The @option{-mpopcntb} option allows GCC
+ double precision FP reciprocal estimate instruction implemented on the
+ POWER5 processor and other processors that support the PowerPC V2.02
+ architecture.
++The @option{-mpopcntd} option allows GCC to generate the popcount
++instruction implemented on the POWER7 processor and other processors
++that support the PowerPC V2.06 architecture.
+ The @option{-mfprnd} option allows GCC to generate the FP round to
+ integer instructions implemented on the POWER5+ processor and other
+ processors that support the PowerPC V2.03 architecture.
+@@ -13663,9 +13673,9 @@ The @option{-mcpu} options automatically
+ following options:
+ 
+ @gccoptlist{-maltivec  -mfprnd  -mhard-float  -mmfcrf  -mmultiple @gol
+--mnew-mnemonics  -mpopcntb  -mpower  -mpower2  -mpowerpc64 @gol
++-mnew-mnemonics  -mpopcntb -mpopcntd  -mpower  -mpower2  -mpowerpc64 @gol
+ -mpowerpc-gpopt  -mpowerpc-gfxopt  -msingle-float -mdouble-float @gol
+--msimple-fpu -mstring  -mmulhw  -mdlmzb  -mmfpgpr}
++-msimple-fpu -mstring  -mmulhw  -mdlmzb  -mmfpgpr -mvsx}
+ 
+ The particular options set for any particular CPU will vary between
+ compiler versions, depending on what setting seems to produce optimal
+@@ -13766,6 +13776,44 @@ instructions.
+ This option has been deprecated.  Use @option{-mspe} and
+ @option{-mno-spe} instead.
+ 
++ at item -mvsx
++ at itemx -mno-vsx
++ at opindex mvsx
++ at opindex mno-vsx
++Generate code that uses (does not use) vector/scalar (VSX)
++instructions, and also enable the use of built-in functions that allow
++more direct access to the VSX instruction set.
++
++ at item -mvsx-vector-memory
++ at itemx -mno-vsx-vector-memory
++If @option{-mvsx}, use VSX memory reference instructions for vectors
++instead of the Altivec instructions This option is a temporary switch
++to tune the compiler, and may not be supported in future versions.
++
++ at item -mvsx-vector-float
++ at itemx -mno-vsx-vector-float
++If @option{-mvsx}, use VSX arithmetic instructions for float vectors.
++This option is a temporary switch to tune the compiler, and may not be
++supported in future versions.
++
++ at item -mvsx-vector-double
++ at itemx -mno-vsx-vector-double
++If @option{-mvsx}, use VSX arithmetic instructions for double
++vectors.  This option is a temporary switch to tune the
++compiler, and may not be supported in future versions.
++
++ at item -mvsx-scalar-double
++ at itemx -mno-vsx-scalar-double
++If @option{-mvsx}, use VSX arithmetic instructions for scalar double.
++This option is a temporary switch to tune the compiler, and may not be
++supported in future versions.
++
++ at item -mvsx-scalar-memory
++ at itemx -mno-vsx-scalar-memory
++If @option{-mvsx}, use VSX memory reference instructions for scalar
++double.  This option is a temporary switch to tune the compiler, and
++may not be supported in future versions.
++
+ @item -mfloat-gprs=@var{yes/single/double/no}
+ @itemx -mfloat-gprs
+ @opindex mfloat-gprs
+--- gcc/doc/md.texi	(.../trunk)	(revision 144557)
++++ gcc/doc/md.texi	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -1905,7 +1905,19 @@ Address base register
+ Floating point register
+ 
+ @item v
+-Vector register
++Altivec vector register
++
++ at item wd
++VSX vector register to hold vector double data
++
++ at item wf
++VSX vector register to hold vector float data
++
++ at item ws
++VSX vector register to hold scalar float data
++
++ at item wa
++Any VSX register
+ 
+ @item h
+ @samp{MQ}, @samp{CTR}, or @samp{LINK} register
+@@ -1991,6 +2003,9 @@ AND masks that can be performed by two r
+ @item W
+ Vector constant that does not require memory
+ 
++ at item j
++Vector constant that is all zeros.
++
+ @end table
+ 
+ @item Intel 386--- at file{config/i386/constraints.md}
+--- gcc/reload.c	(.../trunk)	(revision 144557)
++++ gcc/reload.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -6255,8 +6255,14 @@ subst_reloads (rtx insn)
+ 	    *r->where = reloadreg;
+ 	}
+       /* If reload got no reg and isn't optional, something's wrong.  */
+-      else
+-	gcc_assert (rld[r->what].optional);
++      else if (!rld[r->what].optional)
++	{
++	  char buffer[100];
++	  sprintf (buffer,
++		   "unable to find register for reload, replacement #%d",
++		   i);
++	  fatal_insn (buffer, insn);
++	}
+     }
+ }
+ 
+--- gcc/testsuite/gcc.target/powerpc/popcount-3.c	(.../trunk)	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/popcount-3.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,9 @@
++/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-options "-O2 -mcpu=power7 -m64" } */
++/* { dg-final { scan-assembler "popcntd" } } */
++
++long foo(int x)
++{
++  return __builtin_popcountl(x);
++}
+--- gcc/testsuite/gcc.target/powerpc/vsx-vector-1.c	(.../trunk)	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/vsx-vector-1.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,74 @@
++/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* { dg-options "-O2 -ftree-vectorize -mcpu=power7 -m64" } */
++/* { dg-final { scan-assembler "xvadddp" } } */
++/* { dg-final { scan-assembler "xvsubdp" } } */
++/* { dg-final { scan-assembler "xvmuldp" } } */
++/* { dg-final { scan-assembler "xvdivdp" } } */
++/* { dg-final { scan-assembler "xvmadd" } } */
++/* { dg-final { scan-assembler "xvmsub" } } */
++
++#ifndef SIZE
++#define SIZE 1024
++#endif
++
++double a[SIZE] __attribute__((__aligned__(32)));
++double b[SIZE] __attribute__((__aligned__(32)));
++double c[SIZE] __attribute__((__aligned__(32)));
++double d[SIZE] __attribute__((__aligned__(32)));
++double e[SIZE] __attribute__((__aligned__(32)));
++
++void
++vector_add (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] + c[i];
++}
++
++void
++vector_subtract (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] - c[i];
++}
++
++void
++vector_multiply (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] * c[i];
++}
++
++void
++vector_multiply_add (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = (b[i] * c[i]) + d[i];
++}
++
++void
++vector_multiply_subtract (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = (b[i] * c[i]) - d[i];
++}
++
++void
++vector_divide (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] / c[i];
++}
+--- gcc/testsuite/gcc.target/powerpc/vsx-vector-2.c	(.../trunk)	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/vsx-vector-2.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,74 @@
++/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* { dg-options "-O2 -ftree-vectorize -mcpu=power7 -m64" } */
++/* { dg-final { scan-assembler "xvaddsp" } } */
++/* { dg-final { scan-assembler "xvsubsp" } } */
++/* { dg-final { scan-assembler "xvmulsp" } } */
++/* { dg-final { scan-assembler "xvdivsp" } } */
++/* { dg-final { scan-assembler "xvmadd" } } */
++/* { dg-final { scan-assembler "xvmsub" } } */
++
++#ifndef SIZE
++#define SIZE 1024
++#endif
++
++float a[SIZE] __attribute__((__aligned__(32)));
++float b[SIZE] __attribute__((__aligned__(32)));
++float c[SIZE] __attribute__((__aligned__(32)));
++float d[SIZE] __attribute__((__aligned__(32)));
++float e[SIZE] __attribute__((__aligned__(32)));
++
++void
++vector_add (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] + c[i];
++}
++
++void
++vector_subtract (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] - c[i];
++}
++
++void
++vector_multiply (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] * c[i];
++}
++
++void
++vector_multiply_add (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = (b[i] * c[i]) + d[i];
++}
++
++void
++vector_multiply_subtract (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = (b[i] * c[i]) - d[i];
++}
++
++void
++vector_divide (void)
++{
++  int i;
++
++  for (i = 0; i < SIZE; i++)
++    a[i] = b[i] / c[i];
++}
+--- gcc/testsuite/gcc.target/powerpc/vsx-vector-3.c	(.../trunk)	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/vsx-vector-3.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,48 @@
++/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* { dg-options "-O2 -ftree-vectorize -mcpu=power7 -m64" } */
++/* { dg-final { scan-assembler "xvadddp" } } */
++/* { dg-final { scan-assembler "xvsubdp" } } */
++/* { dg-final { scan-assembler "xvmuldp" } } */
++/* { dg-final { scan-assembler "xvdivdp" } } */
++/* { dg-final { scan-assembler "xvmadd" } } */
++/* { dg-final { scan-assembler "xvmsub" } } */
++
++__vector double a, b, c, d;
++
++void
++vector_add (void)
++{
++  a = b + c;
++}
++
++void
++vector_subtract (void)
++{
++  a = b - c;
++}
++
++void
++vector_multiply (void)
++{
++  a = b * c;
++}
++
++void
++vector_multiply_add (void)
++{
++  a = (b * c) + d;
++}
++
++void
++vector_multiply_subtract (void)
++{
++  a = (b * c) - d;
++}
++
++void
++vector_divide (void)
++{
++  a = b / c;
++}
+--- gcc/testsuite/gcc.target/powerpc/popcount-2.c	(.../trunk)	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/popcount-2.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,9 @@
++/* { dg-do compile { target { ilp32 } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-options "-O2 -mcpu=power7 -m32" } */
++/* { dg-final { scan-assembler "popcntw" } } */
++
++int foo(int x)
++{
++  return __builtin_popcount(x);
++}
+--- gcc/testsuite/gcc.target/powerpc/vsx-vector-4.c	(.../trunk)	(revision 0)
++++ gcc/testsuite/gcc.target/powerpc/vsx-vector-4.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,48 @@
++/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* { dg-options "-O2 -ftree-vectorize -mcpu=power7 -m64" } */
++/* { dg-final { scan-assembler "xvaddsp" } } */
++/* { dg-final { scan-assembler "xvsubsp" } } */
++/* { dg-final { scan-assembler "xvmulsp" } } */
++/* { dg-final { scan-assembler "xvdivsp" } } */
++/* { dg-final { scan-assembler "xvmadd" } } */
++/* { dg-final { scan-assembler "xvmsub" } } */
++
++__vector float a, b, c, d;
++
++void
++vector_add (void)
++{
++  a = b + c;
++}
++
++void
++vector_subtract (void)
++{
++  a = b - c;
++}
++
++void
++vector_multiply (void)
++{
++  a = b * c;
++}
++
++void
++vector_multiply_add (void)
++{
++  a = (b * c) + d;
++}
++
++void
++vector_multiply_subtract (void)
++{
++  a = (b * c) - d;
++}
++
++void
++vector_divide (void)
++{
++  a = b / c;
++}
+--- gcc/testsuite/gcc.dg/vmx/vmx.exp	(.../trunk)	(revision 144557)
++++ gcc/testsuite/gcc.dg/vmx/vmx.exp	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -31,7 +31,7 @@ if {![istarget powerpc*-*-*]
+ # nothing but extensions.
+ global DEFAULT_VMXCFLAGS
+ if ![info exists DEFAULT_VMXCFLAGS] then {
+-    set DEFAULT_VMXCFLAGS "-maltivec -mabi=altivec -std=gnu99"
++    set DEFAULT_VMXCFLAGS "-maltivec -mabi=altivec -std=gnu99 -mno-vsx"
+ }
+ 
+ # If the target system supports AltiVec instructions, the default action
+--- gcc/testsuite/lib/target-supports.exp	(.../trunk)	(revision 144557)
++++ gcc/testsuite/lib/target-supports.exp	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -873,6 +873,32 @@ proc check_sse2_hw_available { } {
+     }]
+ }
+ 
++# Return 1 if the target supports executing VSX instructions, 0
++# otherwise.  Cache the result.
++
++proc check_vsx_hw_available { } {
++    return [check_cached_effective_target vsx_hw_available {
++	# Some simulators are known to not support VSX instructions.
++	# For now, disable on Darwin
++	if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} {
++	    expr 0
++	} else {
++	    set options "-mvsx"
++	    check_runtime_nocache vsx_hw_available {
++		int main()
++		{
++		#ifdef __MACH__
++		  asm volatile ("xxlor vs0,vs0,vs0");
++		#else
++		  asm volatile ("xxlor 0,0,0");
++	        #endif
++		  return 0;
++		}
++	    } $options
++	}
++    }]
++}
++
+ # Return 1 if the target supports executing AltiVec instructions, 0
+ # otherwise.  Cache the result.
+ 
+@@ -883,12 +909,13 @@ proc check_vmx_hw_available { } {
+ 	    expr 0
+ 	} else {
+ 	    # Most targets don't require special flags for this test case, but
+-	    # Darwin does.
++	    # Darwin does.  Just to be sure, make sure VSX is not enabled for
++	    # the altivec tests.
+ 	    if { [istarget *-*-darwin*]
+ 		 || [istarget *-*-aix*] } {
+-		set options "-maltivec"
++		set options "-maltivec -mno-vsx"
+ 	    } else {
+-		set options ""
++		set options "-mno-vsx"
+ 	    }
+ 	    check_runtime_nocache vmx_hw_available {
+ 		int main()
+@@ -1519,6 +1546,33 @@ proc check_effective_target_powerpc_alti
+     }
+ }
+ 
++# Return 1 if this is a PowerPC target supporting -mvsx
++
++proc check_effective_target_powerpc_vsx_ok { } {
++    if { ([istarget powerpc*-*-*]
++         && ![istarget powerpc-*-linux*paired*])
++	 || [istarget rs6000-*-*] } {
++	# AltiVec is not supported on AIX before 5.3.
++	if { [istarget powerpc*-*-aix4*]
++	     || [istarget powerpc*-*-aix5.1*] 
++	     || [istarget powerpc*-*-aix5.2*] } {
++	    return 0
++	}
++	return [check_no_compiler_messages powerpc_vsx_ok object {
++	    int main (void) {
++#ifdef __MACH__
++		asm volatile ("xxlor vs0,vs0,vs0");
++#else
++		asm volatile ("xxlor 0,0,0");
++#endif
++		return 0;
++	    }
++	} "-mvsx"]
++    } else {
++	return 0
++    }
++}
++
+ # Return 1 if this is a PowerPC target supporting -mcpu=cell.
+ 
+ proc check_effective_target_powerpc_ppu_ok { } {
+--- gcc/config.in	(.../trunk)	(revision 144557)
++++ gcc/config.in	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -334,12 +334,18 @@
+ #endif
+ 
+ 
+-/* Define if your assembler supports popcntb field. */
++/* Define if your assembler supports popcntb instruction. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_AS_POPCNTB
+ #endif
+ 
+ 
++/* Define if your assembler supports popcntd instruction. */
++#ifndef USED_FOR_TARGET
++#undef HAVE_AS_POPCNTD
++#endif
++
++
+ /* Define if your assembler supports .register. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_AS_REGISTER_PSEUDO_OP
+--- gcc/configure.ac	(.../trunk)	(revision 144557)
++++ gcc/configure.ac	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -2,7 +2,7 @@
+ # Process this file with autoconf to generate a configuration script.
+ 
+ # Copyright 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
+-# 2007, 2008 Free Software Foundation, Inc.
++# 2007, 2008, 2009 Free Software Foundation, Inc.
+ 
+ #This file is part of GCC.
+ 
+@@ -3080,7 +3080,7 @@ foo:	nop
+     esac
+ 
+     gcc_GAS_CHECK_FEATURE([move fp gpr support],
+-      gcc_cv_as_powerpc_mfpgpr, [9,99,0],,
++      gcc_cv_as_powerpc_mfpgpr, [2,19,2],,
+       [$conftest_s],,
+       [AC_DEFINE(HAVE_AS_MFPGPR, 1,
+ 	  [Define if your assembler supports mffgpr and mftgpr.])])
+@@ -3114,7 +3114,7 @@ LCF0:
+     esac
+ 
+     gcc_GAS_CHECK_FEATURE([compare bytes support],
+-      gcc_cv_as_powerpc_cmpb, [9,99,0], -a32,
++      gcc_cv_as_powerpc_cmpb, [2,19,2], -a32,
+       [$conftest_s],,
+       [AC_DEFINE(HAVE_AS_CMPB, 1,
+ 	  [Define if your assembler supports cmpb.])])
+@@ -3129,7 +3129,7 @@ LCF0:
+     esac
+ 
+     gcc_GAS_CHECK_FEATURE([decimal float support],
+-      gcc_cv_as_powerpc_dfp, [9,99,0], -a32,
++      gcc_cv_as_powerpc_dfp, [2,19,2], -a32,
+       [$conftest_s],,
+       [AC_DEFINE(HAVE_AS_DFP, 1,
+ 	  [Define if your assembler supports DFP instructions.])])
+@@ -3144,11 +3144,26 @@ LCF0:
+     esac
+ 
+     gcc_GAS_CHECK_FEATURE([vector-scalar support],
+-      gcc_cv_as_powerpc_vsx, [9,99,0], -a32,
++      gcc_cv_as_powerpc_vsx, [2,19,2], -a32,
+       [$conftest_s],,
+       [AC_DEFINE(HAVE_AS_VSX, 1,
+ 	  [Define if your assembler supports VSX instructions.])])
+ 
++    case $target in
++      *-*-aix*) conftest_s='	.machine "pwr7"
++	.csect .text[[PR]]
++	popcntd 3,3';;
++      *) conftest_s='	.machine power7
++	.text
++	popcntd 3,3';;
++    esac
++
++    gcc_GAS_CHECK_FEATURE([popcntd support],
++      gcc_cv_as_powerpc_popcntd, [2,19,2], -a32,
++      [$conftest_s],,
++      [AC_DEFINE(HAVE_AS_POPCNTD, 1,
++	  [Define if your assembler supports POPCNTD instructions.])])
++
+     gcc_GAS_CHECK_FEATURE([.gnu_attribute support],
+       gcc_cv_as_powerpc_gnu_attribute, [2,18,0],,
+       [.gnu_attribute 4,1],,
+--- gcc/configure	(.../trunk)	(revision 144557)
++++ gcc/configure	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -23225,7 +23225,7 @@ if test "${gcc_cv_as_powerpc_mfpgpr+set}
+ else
+   gcc_cv_as_powerpc_mfpgpr=no
+     if test $in_tree_gas = yes; then
+-    if test $gcc_cv_gas_vers -ge `expr \( \( 9 \* 1000 \) + 99 \) \* 1000 + 0`
++    if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 19 \) \* 1000 + 2`
+   then gcc_cv_as_powerpc_mfpgpr=yes
+ fi
+   elif test x$gcc_cv_as != x; then
+@@ -23321,7 +23321,7 @@ if test "${gcc_cv_as_powerpc_cmpb+set}" 
+ else
+   gcc_cv_as_powerpc_cmpb=no
+     if test $in_tree_gas = yes; then
+-    if test $gcc_cv_gas_vers -ge `expr \( \( 9 \* 1000 \) + 99 \) \* 1000 + 0`
++    if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 19 \) \* 1000 + 2`
+   then gcc_cv_as_powerpc_cmpb=yes
+ fi
+   elif test x$gcc_cv_as != x; then
+@@ -23367,7 +23367,7 @@ if test "${gcc_cv_as_powerpc_dfp+set}" =
+ else
+   gcc_cv_as_powerpc_dfp=no
+     if test $in_tree_gas = yes; then
+-    if test $gcc_cv_gas_vers -ge `expr \( \( 9 \* 1000 \) + 99 \) \* 1000 + 0`
++    if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 19 \) \* 1000 + 2`
+   then gcc_cv_as_powerpc_dfp=yes
+ fi
+   elif test x$gcc_cv_as != x; then
+@@ -23413,7 +23413,7 @@ if test "${gcc_cv_as_powerpc_vsx+set}" =
+ else
+   gcc_cv_as_powerpc_vsx=no
+     if test $in_tree_gas = yes; then
+-    if test $gcc_cv_gas_vers -ge `expr \( \( 9 \* 1000 \) + 99 \) \* 1000 + 0`
++    if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 19 \) \* 1000 + 2`
+   then gcc_cv_as_powerpc_vsx=yes
+ fi
+   elif test x$gcc_cv_as != x; then
+@@ -23443,6 +23443,52 @@ _ACEOF
+ 
+ fi
+ 
++    case $target in
++      *-*-aix*) conftest_s='	.machine "pwr7"
++	.csect .text[PR]
++	popcntd 3,3';;
++      *) conftest_s='	.machine power7
++	.text
++	popcntd 3,3';;
++    esac
++
++    echo "$as_me:$LINENO: checking assembler for popcntd support" >&5
++echo $ECHO_N "checking assembler for popcntd support... $ECHO_C" >&6
++if test "${gcc_cv_as_powerpc_popcntd+set}" = set; then
++  echo $ECHO_N "(cached) $ECHO_C" >&6
++else
++  gcc_cv_as_powerpc_popcntd=no
++    if test $in_tree_gas = yes; then
++    if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 19 \) \* 1000 + 2`
++  then gcc_cv_as_powerpc_popcntd=yes
++fi
++  elif test x$gcc_cv_as != x; then
++    echo "$conftest_s" > conftest.s
++    if { ac_try='$gcc_cv_as -a32 -o conftest.o conftest.s >&5'
++  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
++  (eval $ac_try) 2>&5
++  ac_status=$?
++  echo "$as_me:$LINENO: \$? = $ac_status" >&5
++  (exit $ac_status); }; }
++    then
++	gcc_cv_as_powerpc_popcntd=yes
++    else
++      echo "configure: failed program was" >&5
++      cat conftest.s >&5
++    fi
++    rm -f conftest.o conftest.s
++  fi
++fi
++echo "$as_me:$LINENO: result: $gcc_cv_as_powerpc_popcntd" >&5
++echo "${ECHO_T}$gcc_cv_as_powerpc_popcntd" >&6
++if test $gcc_cv_as_powerpc_popcntd = yes; then
++
++cat >>confdefs.h <<\_ACEOF
++#define HAVE_AS_POPCNTD 1
++_ACEOF
++
++fi
++
+     echo "$as_me:$LINENO: checking assembler for .gnu_attribute support" >&5
+ echo $ECHO_N "checking assembler for .gnu_attribute support... $ECHO_C" >&6
+ if test "${gcc_cv_as_powerpc_gnu_attribute+set}" = set; then
+--- gcc/config/rs6000/aix53.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/aix53.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -57,20 +57,24 @@ do {									\
+ #undef ASM_SPEC
+ #define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
+ 
+-/* Common ASM definitions used by ASM_SPEC amongst the various targets
+-   for handling -mcpu=xxx switches.  */
++/* Common ASM definitions used by ASM_SPEC amongst the various targets for
++   handling -mcpu=xxx switches.  There is a parallel list in driver-rs6000.c to
++   provide the default assembler options if the user uses -mcpu=native, so if
++   you make changes here, make them there also.  */
+ #undef ASM_CPU_SPEC
+ #define ASM_CPU_SPEC \
+ "%{!mcpu*: %{!maix64: \
+   %{mpowerpc64: -mppc64} \
+   %{maltivec: -m970} \
+   %{!maltivec: %{!mpower64: %(asm_default)}}}} \
++%{mcpu=native: %(asm_cpu_native)} \
+ %{mcpu=power3: -m620} \
+ %{mcpu=power4: -mpwr4} \
+ %{mcpu=power5: -mpwr5} \
+ %{mcpu=power5+: -mpwr5x} \
+ %{mcpu=power6: -mpwr6} \
+ %{mcpu=power6x: -mpwr6} \
++%{mcpu=power7: -mpwr7} \
+ %{mcpu=powerpc: -mppc} \
+ %{mcpu=rs64a: -mppc} \
+ %{mcpu=603: -m603} \
+--- gcc/config/rs6000/vector.md	(.../trunk)	(revision 0)
++++ gcc/config/rs6000/vector.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,518 @@
++;; Expander definitions for vector support between altivec & vsx.  No
++;; instructions are in this file, this file provides the generic vector
++;; expander, and the actual vector instructions will be in altivec.md and
++;; vsx.md
++
++;; Copyright (C) 2009
++;; Free Software Foundation, Inc.
++;; Contributed by Michael Meissner <meissner at linux.vnet.ibm.com>
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published
++;; by the Free Software Foundation; either version 3, or (at your
++;; option) any later version.
++
++;; GCC is distributed in the hope that it will be useful, but WITHOUT
++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++;; License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++
++;; Vector int modes
++(define_mode_iterator VEC_I [V16QI V8HI V4SI])
++
++;; Vector float modes
++(define_mode_iterator VEC_F [V4SF V2DF])
++
++;; Vector logical modes
++(define_mode_iterator VEC_L [V16QI V8HI V4SI V2DI V4SF V2DF TI])
++
++;; Vector modes for moves.  Don't do TImode here.
++(define_mode_iterator VEC_M [V16QI V8HI V4SI V2DI V4SF V2DF])
++
++;; Vector comparison modes
++(define_mode_iterator VEC_C [V16QI V8HI V4SI V4SF V2DF])
++
++;; Base type from vector mode
++(define_mode_attr VEC_base [(V16QI "QI")
++			    (V8HI  "HI")
++			    (V4SI  "SI")
++			    (V2DI  "DI")
++			    (V4SF  "SF")
++			    (V2DF  "DF")
++			    (TI    "TI")])
++
++;; Same size integer type for floating point data
++(define_mode_attr VEC_int [(V4SF  "v4si")
++			   (V2DF  "v2di")])
++
++(define_mode_attr VEC_INT [(V4SF  "V4SI")
++			   (V2DF  "V2DI")])
++
++;; Vector move instructions.
++(define_expand "mov<mode>"
++  [(set (match_operand:VEC_M 0 "nonimmediate_operand" "")
++	(match_operand:VEC_M 1 "any_operand" ""))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++{
++  /* modes without special handling just generate the normal SET operation.  */
++  if (<MODE>mode != TImode && <MODE>mode != V2DImode && <MODE>mode != V2DFmode)
++    {
++      rs6000_emit_move (operands[0], operands[1], <MODE>mode);
++      DONE;
++    }
++  else if (!vlogical_operand (operands[0], <MODE>mode)
++	   && !vlogical_operand (operands[1], <MODE>mode))
++    operands[1] = force_reg (<MODE>mode, operands[1]);
++})
++
++;; Generic vector floating point load/store instructions.  These will match
++;; insns defined in vsx.md or altivec.md depending on the switches.
++(define_expand "vector_load_<mode>"
++  [(set (match_operand:VEC_M 0 "vfloat_operand" "")
++	(match_operand:VEC_M 1 "memory_operand" ""))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "vector_store_<mode>"
++  [(set (match_operand:VEC_M 0 "memory_operand" "")
++	(match_operand:VEC_M 1 "vfloat_operand" ""))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++;; Splits if a GPR register was chosen for the move
++(define_split
++  [(set (match_operand:VEC_L 0 "nonimmediate_operand" "")
++        (match_operand:VEC_L 1 "input_operand" ""))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)
++   && reload_completed
++   && gpr_or_gpr_p (operands[0], operands[1])"
++  [(pc)]
++{
++  rs6000_split_multireg_move (operands[0], operands[1]);
++  DONE;
++})
++
++
++;; Generic floating point vector arithmetic support
++(define_expand "add<mode>3"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(plus:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")
++		    (match_operand:VEC_F 2 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "sub<mode>3"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(minus:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")
++		     (match_operand:VEC_F 2 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "mul<mode>3"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(mult:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")
++		    (match_operand:VEC_F 2 "vfloat_operand" "")))]
++  "(VECTOR_UNIT_VSX_P (<MODE>mode)
++    || (VECTOR_UNIT_ALTIVEC_P (<MODE>mode) && TARGET_FUSED_MADD))"
++  "
++{
++  if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
++    {
++      emit_insn (gen_altivec_mulv4sf3 (operands[0], operands[1], operands[2]));
++      DONE;
++    }
++}")
++
++(define_expand "div<mode>3"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(div:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")
++		   (match_operand:VEC_F 2 "vfloat_operand" "")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "neg<mode>2"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(neg:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
++    {
++      emit_insn (gen_altivec_negv4sf2 (operands[0], operands[1]));
++      DONE;
++    }
++}")
++
++(define_expand "abs<mode>2"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(abs:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
++    {
++      emit_insn (gen_altivec_absv4sf2 (operands[0], operands[1]));
++      DONE;
++    }
++}")
++
++(define_expand "smin<mode>3"
++  [(set (match_operand:VEC_F 0 "register_operand" "")
++        (smin:VEC_F (match_operand:VEC_F 1 "register_operand" "")
++		    (match_operand:VEC_F 2 "register_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "smax<mode>3"
++  [(set (match_operand:VEC_F 0 "register_operand" "")
++        (smax:VEC_F (match_operand:VEC_F 1 "register_operand" "")
++		    (match_operand:VEC_F 2 "register_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++
++(define_expand "sqrt<mode>2"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(sqrt:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "ftrunc<mode>2"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++  	(fix:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++
++;; Vector comparisons
++(define_expand "vcond<mode>"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(if_then_else:VEC_F
++	 (match_operator 3 "comparison_operator"
++			 [(match_operand:VEC_F 4 "vfloat_operand" "")
++			  (match_operand:VEC_F 5 "vfloat_operand" "")])
++	 (match_operand:VEC_F 1 "vfloat_operand" "")
++	 (match_operand:VEC_F 2 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
++				    operands[3], operands[4], operands[5]))
++    DONE;
++  else
++    FAIL;
++}")
++
++(define_expand "vcond<mode>"
++  [(set (match_operand:VEC_I 0 "vint_operand" "")
++	(if_then_else:VEC_I
++	 (match_operator 3 "comparison_operator"
++			 [(match_operand:VEC_I 4 "vint_operand" "")
++			  (match_operand:VEC_I 5 "vint_operand" "")])
++	 (match_operand:VEC_I 1 "vint_operand" "")
++	 (match_operand:VEC_I 2 "vint_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
++  "
++{
++  if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
++				    operands[3], operands[4], operands[5]))
++    DONE;
++  else
++    FAIL;
++}")
++
++(define_expand "vcondu<mode>"
++  [(set (match_operand:VEC_I 0 "vint_operand" "=v")
++	(if_then_else:VEC_I
++	 (match_operator 3 "comparison_operator"
++			 [(match_operand:VEC_I 4 "vint_operand" "")
++			  (match_operand:VEC_I 5 "vint_operand" "")])
++	 (match_operand:VEC_I 1 "vint_operand" "")
++	 (match_operand:VEC_I 2 "vint_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
++  "
++{
++  if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
++				    operands[3], operands[4], operands[5]))
++    DONE;
++  else
++    FAIL;
++}")
++
++(define_expand "vector_eq<mode>"
++  [(set (match_operand:VEC_C 0 "vlogical_operand" "")
++	(eq:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "")
++		  (match_operand:VEC_C 2 "vlogical_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "vector_gt<mode>"
++  [(set (match_operand:VEC_C 0 "vlogical_operand" "")
++	(gt:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "")
++		  (match_operand:VEC_C 2 "vlogical_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "vector_ge<mode>"
++  [(set (match_operand:VEC_C 0 "vlogical_operand" "")
++	(ge:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "")
++		  (match_operand:VEC_C 2 "vlogical_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "vector_gtu<mode>"
++  [(set (match_operand:VEC_I 0 "vint_operand" "")
++	(gtu:VEC_I (match_operand:VEC_I 1 "vint_operand" "")
++		   (match_operand:VEC_I 2 "vint_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
++  "")
++
++(define_expand "vector_geu<mode>"
++  [(set (match_operand:VEC_I 0 "vint_operand" "")
++	(geu:VEC_I (match_operand:VEC_I 1 "vint_operand" "")
++		   (match_operand:VEC_I 2 "vint_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
++  "")
++
++;; Note the arguments for __builtin_altivec_vsel are op2, op1, mask
++;; which is in the reverse order that we want
++(define_expand "vector_vsel<mode>"
++  [(match_operand:VEC_F 0 "vlogical_operand" "")
++   (match_operand:VEC_F 1 "vlogical_operand" "")
++   (match_operand:VEC_F 2 "vlogical_operand" "")
++   (match_operand:VEC_F 3 "vlogical_operand" "")]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (VECTOR_UNIT_VSX_P (<MODE>mode))
++    emit_insn (gen_vsx_vsel<mode> (operands[0], operands[3],
++				   operands[2], operands[1]));
++  else
++    emit_insn (gen_altivec_vsel<mode> (operands[0], operands[3],
++				       operands[2], operands[1]));
++  DONE;
++}")
++
++(define_expand "vector_vsel<mode>"
++  [(match_operand:VEC_I 0 "vlogical_operand" "")
++   (match_operand:VEC_I 1 "vlogical_operand" "")
++   (match_operand:VEC_I 2 "vlogical_operand" "")
++   (match_operand:VEC_I 3 "vlogical_operand" "")]
++  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
++  "
++{
++  emit_insn (gen_altivec_vsel<mode> (operands[0], operands[3],
++				     operands[2], operands[1]));
++  DONE;
++}")
++
++
++;; Vector logical instructions
++(define_expand "xor<mode>3"
++  [(set (match_operand:VEC_L 0 "vlogical_operand" "")
++        (xor:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "")
++		   (match_operand:VEC_L 2 "vlogical_operand" "")))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "ior<mode>3"
++  [(set (match_operand:VEC_L 0 "vlogical_operand" "")
++        (ior:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "")
++		   (match_operand:VEC_L 2 "vlogical_operand" "")))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "and<mode>3"
++  [(set (match_operand:VEC_L 0 "vlogical_operand" "")
++        (and:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "")
++		   (match_operand:VEC_L 2 "vlogical_operand" "")))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "one_cmpl<mode>2"
++  [(set (match_operand:VEC_L 0 "vlogical_operand" "")
++        (not:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "")))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++  
++(define_expand "nor<mode>3"
++  [(set (match_operand:VEC_L 0 "vlogical_operand" "")
++        (not:VEC_L (ior:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "")
++			      (match_operand:VEC_L 2 "vlogical_operand" ""))))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++(define_expand "andc<mode>3"
++  [(set (match_operand:VEC_L 0 "vlogical_operand" "")
++        (and:VEC_L (not:VEC_L (match_operand:VEC_L 2 "vlogical_operand" ""))
++		   (match_operand:VEC_L 1 "vlogical_operand" "")))]
++  "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "")
++
++;; Same size conversions
++(define_expand "float<VEC_int><mode>2"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(float:VEC_F (match_operand:<VEC_INT> 1 "vint_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
++    {
++      emit_insn (gen_altivec_vcfsx (operands[0], operands[1], const0_rtx));
++      DONE;
++    }
++}")
++
++(define_expand "unsigned_float<VEC_int><mode>2"
++  [(set (match_operand:VEC_F 0 "vfloat_operand" "")
++	(unsigned_float:VEC_F (match_operand:<VEC_INT> 1 "vint_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
++    {
++      emit_insn (gen_altivec_vcfux (operands[0], operands[1], const0_rtx));
++      DONE;
++    }
++}")
++
++(define_expand "fix_trunc<mode><VEC_int>2"
++  [(set (match_operand:<VEC_INT> 0 "vint_operand" "")
++	(fix:<VEC_INT> (match_operand:VEC_F 1 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
++    {
++      emit_insn (gen_altivec_vctsxs (operands[0], operands[1], const0_rtx));
++      DONE;
++    }
++}")
++
++(define_expand "fixuns_trunc<mode><VEC_int>2"
++  [(set (match_operand:<VEC_INT> 0 "vint_operand" "")
++	(unsigned_fix:<VEC_INT> (match_operand:VEC_F 1 "vfloat_operand" "")))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++  "
++{
++  if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
++    {
++      emit_insn (gen_altivec_vctuxs (operands[0], operands[1], const0_rtx));
++      DONE;
++    }
++}")
++
++
++;; Vector initialization, set, extract
++(define_expand "vec_init<mode>"
++  [(match_operand:VEC_C 0 "vlogical_operand" "")
++   (match_operand:VEC_C 1 "vec_init_operand" "")]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++{
++  rs6000_expand_vector_init (operands[0], operands[1]);
++  DONE;
++})
++
++(define_expand "vec_set<mode>"
++  [(match_operand:VEC_C 0 "vlogical_operand" "")
++   (match_operand:<VEC_base> 1 "register_operand" "")
++   (match_operand 2 "const_int_operand" "")]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++{
++  rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
++  DONE;
++})
++
++(define_expand "vec_extract<mode>"
++  [(match_operand:<VEC_base> 0 "register_operand" "")
++   (match_operand:VEC_C 1 "vlogical_operand" "")
++   (match_operand 2 "const_int_operand" "")]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
++{
++  rs6000_expand_vector_extract (operands[0], operands[1],
++				INTVAL (operands[2]));
++  DONE;
++})
++
++;; Interleave patterns
++(define_expand "vec_interleave_highv4sf"
++  [(set (match_operand:V4SF 0 "vfloat_operand" "")
++        (vec_merge:V4SF
++	 (vec_select:V4SF (match_operand:V4SF 1 "vfloat_operand" "")
++			  (parallel [(const_int 0)
++				     (const_int 2)
++				     (const_int 1)
++				     (const_int 3)]))
++	 (vec_select:V4SF (match_operand:V4SF 2 "vfloat_operand" "")
++			  (parallel [(const_int 2)
++				     (const_int 0)
++				     (const_int 3)
++				     (const_int 1)]))
++	 (const_int 5)))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
++  "")
++
++(define_expand "vec_interleave_lowv4sf"
++  [(set (match_operand:V4SF 0 "vfloat_operand" "")
++        (vec_merge:V4SF
++	 (vec_select:V4SF (match_operand:V4SF 1 "vfloat_operand" "")
++			  (parallel [(const_int 2)
++				     (const_int 0)
++				     (const_int 3)
++				     (const_int 1)]))
++	 (vec_select:V4SF (match_operand:V4SF 2 "vfloat_operand" "")
++			  (parallel [(const_int 0)
++				     (const_int 2)
++				     (const_int 1)
++				     (const_int 3)]))
++	 (const_int 5)))]
++  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
++  "")
++
++(define_expand "vec_interleave_highv2df"
++  [(set (match_operand:V2DF 0 "vfloat_operand" "")
++	(vec_concat:V2DF
++	 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "")
++			(parallel [(const_int 0)]))
++	 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "")
++			(parallel [(const_int 0)]))))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "")
++
++(define_expand "vec_interleave_lowv2df"
++  [(set (match_operand:V2DF 0 "vfloat_operand" "")
++	(vec_concat:V2DF
++	 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "")
++			(parallel [(const_int 1)]))
++	 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "")
++			(parallel [(const_int 1)]))))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "")
++
++;; For 2 element vectors, even/odd is the same as high/low
++(define_expand "vec_extract_evenv2df"
++  [(set (match_operand:V2DF 0 "vfloat_operand" "")
++	(vec_concat:V2DF
++	 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "")
++			(parallel [(const_int 0)]))
++	 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "")
++			(parallel [(const_int 0)]))))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "")
++
++(define_expand "vec_extract_oddv2df"
++  [(set (match_operand:V2DF 0 "vfloat_operand" "")
++	(vec_concat:V2DF
++	 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "")
++			(parallel [(const_int 1)]))
++	 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "")
++			(parallel [(const_int 1)]))))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "")
+--- gcc/config/rs6000/spe.md	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/spe.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -99,7 +99,7 @@ (define_insn "*divsf3_gpr"
+ 
+ ;; Floating point conversion instructions.
+ 
+-(define_insn "fixuns_truncdfsi2"
++(define_insn "spe_fixuns_truncdfsi2"
+   [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ 	(unsigned_fix:SI (match_operand:DF 1 "gpc_reg_operand" "r")))]
+   "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+--- gcc/config/rs6000/constraints.md	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/constraints.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -17,6 +17,8 @@
+ ;; along with GCC; see the file COPYING3.  If not see
+ ;; <http://www.gnu.org/licenses/>.
+ 
++;; Available constraint letters: "e", "k", "u", "A", "B", "C", "D"
++
+ ;; Register constraints
+ 
+ (define_register_constraint "f" "TARGET_HARD_FLOAT && TARGET_FPRS
+@@ -50,6 +52,23 @@ (define_register_constraint "y" "CR_REGS
+ (define_register_constraint "z" "XER_REGS"
+   "@internal")
+ 
++;; Use w as a prefix to add VSX modes
++;; vector double (V2DF)
++(define_register_constraint "wd" "rs6000_vector_reg_class[V2DFmode]"
++  "@internal")
++
++;; vector float (V4SF)
++(define_register_constraint "wf" "rs6000_vector_reg_class[V4SFmode]"
++  "@internal")
++
++;; scalar double (DF)
++(define_register_constraint "ws" "rs6000_vector_reg_class[DFmode]"
++  "@internal")
++
++;; any VSX register
++(define_register_constraint "wa" "rs6000_vsx_reg_class"
++  "@internal")
++
+ ;; Integer constraints
+ 
+ (define_constraint "I"
+@@ -159,3 +178,7 @@ (define_constraint "t"
+ (define_constraint "W"
+   "vector constant that does not require memory"
+   (match_operand 0 "easy_vector_constant"))
++
++(define_constraint "j"
++  "Zero vector constant"
++  (match_test "(op == const0_rtx || op == CONST0_RTX (GET_MODE (op)))"))
+--- gcc/config/rs6000/predicates.md	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/predicates.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -38,6 +38,37 @@ (define_predicate "altivec_register_oper
+ 		     || ALTIVEC_REGNO_P (REGNO (op))
+ 		     || REGNO (op) > LAST_VIRTUAL_REGISTER")))
+ 
++;; Return 1 if op is a VSX register.
++(define_predicate "vsx_register_operand"
++   (and (match_operand 0 "register_operand")
++	(match_test "GET_CODE (op) != REG
++		     || VSX_REGNO_P (REGNO (op))
++		     || REGNO (op) > LAST_VIRTUAL_REGISTER")))
++
++;; Return 1 if op is a vector register that operates on floating point vectors
++;; (either altivec or VSX).
++(define_predicate "vfloat_operand"
++   (and (match_operand 0 "register_operand")
++	(match_test "GET_CODE (op) != REG
++		     || VFLOAT_REGNO_P (REGNO (op))
++		     || REGNO (op) > LAST_VIRTUAL_REGISTER")))
++
++;; Return 1 if op is a vector register that operates on integer vectors
++;; (only altivec, VSX doesn't support integer vectors)
++(define_predicate "vint_operand"
++   (and (match_operand 0 "register_operand")
++	(match_test "GET_CODE (op) != REG
++		     || VINT_REGNO_P (REGNO (op))
++		     || REGNO (op) > LAST_VIRTUAL_REGISTER")))
++
++;; Return 1 if op is a vector register to do logical operations on (and, or,
++;; xor, etc.)
++(define_predicate "vlogical_operand"
++   (and (match_operand 0 "register_operand")
++	(match_test "GET_CODE (op) != REG
++		     || VLOGICAL_REGNO_P (REGNO (op))
++		     || REGNO (op) > LAST_VIRTUAL_REGISTER")))
++
+ ;; Return 1 if op is XER register.
+ (define_predicate "xer_operand"
+   (and (match_code "reg")
+@@ -234,6 +265,10 @@ (define_predicate "easy_fp_constant"
+ 	      && num_insns_constant_wide ((HOST_WIDE_INT) k[3]) == 1);
+ 
+     case DFmode:
++      /* The constant 0.f is easy under VSX.  */
++      if (op == CONST0_RTX (DFmode) && VECTOR_UNIT_VSX_P (DFmode))
++	return 1;
++
+       /* Force constants to memory before reload to utilize
+ 	 compress_float_constant.
+ 	 Avoid this when flag_unsafe_math_optimizations is enabled
+@@ -396,13 +431,16 @@ (define_predicate "indexed_or_indirect_o
+   (match_code "mem")
+ {
+   op = XEXP (op, 0);
+-  if (TARGET_ALTIVEC
+-      && ALTIVEC_VECTOR_MODE (mode)
++  if (VECTOR_MEM_ALTIVEC_P (mode)
+       && GET_CODE (op) == AND
+       && GET_CODE (XEXP (op, 1)) == CONST_INT
+       && INTVAL (XEXP (op, 1)) == -16)
+     op = XEXP (op, 0);
+ 
++  else if (VECTOR_MEM_VSX_P (mode)
++	   && GET_CODE (op) == PRE_MODIFY)
++    op = XEXP (op, 1);
++
+   return indexed_or_indirect_address (op, mode);
+ })
+ 
+@@ -1336,3 +1374,19 @@ (define_predicate "stmw_operation"
+ 
+   return 1;
+ })
++
++;; Return true if the operand is a legitimate parallel for vec_init
++(define_predicate "vec_init_operand"
++  (match_code "parallel")
++{
++  /* Disallow V2DF mode with MEM's unless both are the same under VSX.  */
++  if (mode == V2DFmode && VECTOR_UNIT_VSX_P (mode))
++    {
++      rtx op0 = XVECEXP (op, 0, 0);
++      rtx op1 = XVECEXP (op, 0, 1);
++      if ((MEM_P (op0) || MEM_P (op1)) && !rtx_equal_p (op0, op1))
++	return 0;
++    }
++
++  return 1;
++})
+--- gcc/config/rs6000/ppc-asm.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/ppc-asm.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -63,7 +63,7 @@
+ #define f16	16
+ #define f17	17
+ #define f18	18
+-#define f19     19
++#define f19	19
+ #define f20	20
+ #define f21	21
+ #define f22	22
+@@ -77,6 +77,143 @@
+ #define f30	30
+ #define f31	31
+ 
++#ifdef __VSX__
++#define f32	32
++#define f33	33
++#define f34	34
++#define f35	35
++#define f36	36
++#define f37	37
++#define f38	38
++#define f39	39
++#define f40	40
++#define f41	41
++#define f42	42
++#define f43	43
++#define f44	44
++#define f45	45
++#define f46	46
++#define f47	47
++#define f48	48
++#define f49	49
++#define f50	30
++#define f51	51
++#define f52	52
++#define f53	53
++#define f54	54
++#define f55	55
++#define f56	56
++#define f57	57
++#define f58	58
++#define f59	59
++#define f60	60
++#define f61	61
++#define f62	62
++#define f63	63
++#endif
++
++#ifdef __ALTIVEC__
++#define v0	0
++#define v1	1
++#define v2	2
++#define v3	3
++#define v4	4
++#define v5	5
++#define v6	6
++#define v7	7
++#define v8	8
++#define v9	9
++#define v10	10
++#define v11	11
++#define v12	12
++#define v13	13
++#define v14	14
++#define v15	15
++#define v16	16
++#define v17	17
++#define v18	18
++#define v19	19
++#define v20	20
++#define v21	21
++#define v22	22
++#define v23	23
++#define v24	24
++#define v25	25
++#define v26	26
++#define v27	27
++#define v28	28
++#define v29	29
++#define v30	30
++#define v31	31
++#endif
++
++#ifdef __VSX__
++#define vs0	0
++#define vs1	1
++#define vs2	2
++#define vs3	3
++#define vs4	4
++#define vs5	5
++#define vs6	6
++#define vs7	7
++#define vs8	8
++#define vs9	9
++#define vs10	10
++#define vs11	11
++#define vs12	12
++#define vs13	13
++#define vs14	14
++#define vs15	15
++#define vs16	16
++#define vs17	17
++#define vs18	18
++#define vs19	19
++#define vs20	20
++#define vs21	21
++#define vs22	22
++#define vs23	23
++#define vs24	24
++#define vs25	25
++#define vs26	26
++#define vs27	27
++#define vs28	28
++#define vs29	29
++#define vs30	30
++#define vs31	31
++#define vs32	32
++#define vs33	33
++#define vs34	34
++#define vs35	35
++#define vs36	36
++#define vs37	37
++#define vs38	38
++#define vs39	39
++#define vs40	40
++#define vs41	41
++#define vs42	42
++#define vs43	43
++#define vs44	44
++#define vs45	45
++#define vs46	46
++#define vs47	47
++#define vs48	48
++#define vs49	49
++#define vs50	30
++#define vs51	51
++#define vs52	52
++#define vs53	53
++#define vs54	54
++#define vs55	55
++#define vs56	56
++#define vs57	57
++#define vs58	58
++#define vs59	59
++#define vs60	60
++#define vs61	61
++#define vs62	62
++#define vs63	63
++#endif
++
+ /*
+  * Macros to glue together two tokens.
+  */
+--- gcc/config/rs6000/linux64.opt	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/linux64.opt	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -20,5 +20,5 @@
+ ; <http://www.gnu.org/licenses/>.
+ 
+ mprofile-kernel
+-Target Report Mask(PROFILE_KERNEL)
++Target Report Var(TARGET_PROFILE_KERNEL)
+ Call mcount for profiling before a function prologue
+--- gcc/config/rs6000/sysv4.opt	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/sysv4.opt	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -32,7 +32,7 @@ Target RejectNegative Joined
+ Specify bit size of immediate TLS offsets
+ 
+ mbit-align
+-Target Report Mask(NO_BITFIELD_TYPE)
++Target Report Var(TARGET_NO_BITFIELD_TYPE)
+ Align to the base type of the bit-field
+ 
+ mstrict-align
+@@ -87,11 +87,11 @@ Target Report Mask(EABI)
+ Use EABI
+ 
+ mbit-word
+-Target Report Mask(NO_BITFIELD_WORD)
++Target Report Var(TARGET_NO_BITFIELD_WORD)
+ Allow bit-fields to cross word boundaries
+ 
+ mregnames
+-Target Mask(REGNAMES)
++Target Var(TARGET_REGNAMES)
+ Use alternate register names
+ 
+ ;; FIXME: Does nothing.
+--- gcc/config/rs6000/rs6000-protos.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/rs6000-protos.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -64,9 +64,14 @@ extern int insvdi_rshift_rlwimi_p (rtx, 
+ extern int registers_ok_for_quad_peep (rtx, rtx);
+ extern int mems_ok_for_quad_peep (rtx, rtx);
+ extern bool gpr_or_gpr_p (rtx, rtx);
++extern enum reg_class rs6000_preferred_reload_class(rtx, enum reg_class);
+ extern enum reg_class rs6000_secondary_reload_class (enum reg_class,
+ 						     enum machine_mode, rtx);
+-
++extern bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
++					    enum machine_mode);
++extern bool rs6000_cannot_change_mode_class (enum machine_mode,
++					     enum machine_mode,
++					     enum reg_class);
+ extern int paired_emit_vector_cond_expr (rtx, rtx, rtx,
+                                          rtx, rtx, rtx);
+ extern void paired_expand_vector_move (rtx operands[]);
+@@ -170,7 +175,6 @@ extern int rs6000_register_move_cost (en
+ 				      enum reg_class, enum reg_class);
+ extern int rs6000_memory_move_cost (enum machine_mode, enum reg_class, int);
+ extern bool rs6000_tls_referenced_p (rtx);
+-extern int rs6000_hard_regno_nregs (int, enum machine_mode);
+ extern void rs6000_conditional_register_usage (void);
+ 
+ /* Declare functions in rs6000-c.c */
+@@ -189,4 +193,6 @@ const char * rs6000_xcoff_strip_dollar (
+ void rs6000_final_prescan_insn (rtx, rtx *operand, int num_operands);
+ 
+ extern bool rs6000_hard_regno_mode_ok_p[][FIRST_PSEUDO_REGISTER];
++extern unsigned char rs6000_class_max_nregs[][LIM_REG_CLASSES];
++extern unsigned char rs6000_hard_regno_nregs[][FIRST_PSEUDO_REGISTER];
+ #endif  /* rs6000-protos.h */
+--- gcc/config/rs6000/t-rs6000	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/t-rs6000	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -16,3 +16,33 @@ rs6000-c.o: $(srcdir)/config/rs6000/rs60
+ 
+ # The rs6000 backend doesn't cause warnings in these files.
+ insn-conditions.o-warn =
++
++MD_INCLUDES = $(srcdir)/config/rs6000/rios1.md \
++	$(srcdir)/config/rs6000/rios2.md \
++	$(srcdir)/config/rs6000/rs64.md \
++	$(srcdir)/config/rs6000/mpc.md \
++	$(srcdir)/config/rs6000/40x.md \
++	$(srcdir)/config/rs6000/440.md \
++	$(srcdir)/config/rs6000/603.md \
++	$(srcdir)/config/rs6000/6xx.md \
++	$(srcdir)/config/rs6000/7xx.md \
++	$(srcdir)/config/rs6000/7450.md \
++	$(srcdir)/config/rs6000/8540.md \
++	$(srcdir)/config/rs6000/e300c2c3.md \
++	$(srcdir)/config/rs6000/e500mc.md \
++	$(srcdir)/config/rs6000/power4.md \
++	$(srcdir)/config/rs6000/power5.md \
++	$(srcdir)/config/rs6000/power6.md \
++	$(srcdir)/config/rs6000/power7.md \
++	$(srcdir)/config/rs6000/cell.md \
++	$(srcdir)/config/rs6000/xfpu.md \
++	$(srcdir)/config/rs6000/predicates.md \
++	$(srcdir)/config/rs6000/constraints.md \
++	$(srcdir)/config/rs6000/darwin.md \
++	$(srcdir)/config/rs6000/sync.md \
++	$(srcdir)/config/rs6000/vector.md \
++	$(srcdir)/config/rs6000/vsx.md \
++	$(srcdir)/config/rs6000/altivec.md \
++	$(srcdir)/config/rs6000/spe.md \
++	$(srcdir)/config/rs6000/dfp.md \
++	$(srcdir)/config/rs6000/paired.md
+--- gcc/config/rs6000/power7.md	(.../trunk)	(revision 0)
++++ gcc/config/rs6000/power7.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,320 @@
++;; Scheduling description for IBM POWER7 processor.
++;; Copyright (C) 2009 Free Software Foundation, Inc.
++;;
++;; Contributed by Pat Haugen (pthaugen at us.ibm.com).
++
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published
++;; by the Free Software Foundation; either version 3, or (at your
++;; option) any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but WITHOUT
++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++;; License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_automaton "power7iu,power7lsu,power7vsu,power7misc")
++
++(define_cpu_unit "iu1_power7,iu2_power7" "power7iu")
++(define_cpu_unit "lsu1_power7,lsu2_power7" "power7lsu")
++(define_cpu_unit "vsu1_power7,vsu2_power7" "power7vsu")
++(define_cpu_unit "bpu_power7,cru_power7" "power7misc")
++(define_cpu_unit "du1_power7,du2_power7,du3_power7,du4_power7,du5_power7"
++                 "power7misc")
++
++
++(define_reservation "DU_power7"
++		    "du1_power7|du2_power7|du3_power7|du4_power7")
++
++(define_reservation "DU2F_power7"
++		    "du1_power7+du2_power7")
++
++(define_reservation "DU4_power7"
++		    "du1_power7+du2_power7+du3_power7+du4_power7")
++
++(define_reservation "FXU_power7"
++                    "iu1_power7|iu2_power7")
++
++(define_reservation "VSU_power7"
++                    "vsu1_power7|vsu2_power7")
++
++(define_reservation "LSU_power7"
++                    "lsu1_power7|lsu2_power7")
++
++
++; Dispatch slots are allocated in order conforming to program order.
++(absence_set "du1_power7" "du2_power7,du3_power7,du4_power7,du5_power7")
++(absence_set "du2_power7" "du3_power7,du4_power7,du5_power7")
++(absence_set "du3_power7" "du4_power7,du5_power7")
++(absence_set "du4_power7" "du5_power7")
++
++
++; LS Unit
++(define_insn_reservation "power7-load" 2
++  (and (eq_attr "type" "load")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,LSU_power7")
++
++(define_insn_reservation "power7-load-ext" 3
++  (and (eq_attr "type" "load_ext")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,LSU_power7,FXU_power7")
++
++(define_insn_reservation "power7-load-update" 2
++  (and (eq_attr "type" "load_u")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,LSU_power7+FXU_power7")
++
++(define_insn_reservation "power7-load-update-indexed" 3
++  (and (eq_attr "type" "load_ux")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,FXU_power7,LSU_power7+FXU_power7")
++
++(define_insn_reservation "power7-load-ext-update" 4
++  (and (eq_attr "type" "load_ext_u")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,LSU_power7+FXU_power7,FXU_power7")
++
++(define_insn_reservation "power7-load-ext-update-indexed" 4
++  (and (eq_attr "type" "load_ext_ux")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,FXU_power7,LSU_power7+FXU_power7,FXU_power7")
++
++(define_insn_reservation "power7-fpload" 3
++  (and (eq_attr "type" "fpload")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,LSU_power7")
++
++(define_insn_reservation "power7-fpload-update" 3
++  (and (eq_attr "type" "fpload_u,fpload_ux")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,LSU_power7+FXU_power7")
++
++(define_insn_reservation "power7-store" 6 ; store-forwarding latency
++  (and (eq_attr "type" "store")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,LSU_power7+FXU_power7")
++
++(define_insn_reservation "power7-store-update" 6
++  (and (eq_attr "type" "store_u")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,LSU_power7+FXU_power7,FXU_power7")
++
++(define_insn_reservation "power7-store-update-indexed" 6
++  (and (eq_attr "type" "store_ux")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,LSU_power7+FXU_power7,FXU_power7")
++
++(define_insn_reservation "power7-fpstore" 6
++  (and (eq_attr "type" "fpstore")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,LSU_power7+VSU_power7")
++
++(define_insn_reservation "power7-fpstore-update" 6
++  (and (eq_attr "type" "fpstore_u,fpstore_ux")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,LSU_power7+VSU_power7+FXU_power7")
++
++(define_insn_reservation "power7-larx" 3
++  (and (eq_attr "type" "load_l")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,LSU_power7")
++
++(define_insn_reservation "power7-stcx" 10
++  (and (eq_attr "type" "store_c")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,LSU_power7")
++
++(define_insn_reservation "power7-vecload" 3
++  (and (eq_attr "type" "vecload")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,LSU_power7")
++
++(define_insn_reservation "power7-vecstore" 6
++  (and (eq_attr "type" "vecstore")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,LSU_power7+VSU_power7")
++
++(define_insn_reservation "power7-sync" 11
++  (and (eq_attr "type" "sync")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,LSU_power7")
++
++
++; FX Unit
++(define_insn_reservation "power7-integer" 1
++  (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\
++                        var_shift_rotate,exts")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,FXU_power7")
++
++(define_insn_reservation "power7-cntlz" 2
++  (and (eq_attr "type" "cntlz")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,FXU_power7")
++
++(define_insn_reservation "power7-two" 2
++  (and (eq_attr "type" "two")
++       (eq_attr "cpu" "power7"))
++  "DU_power7+DU_power7,FXU_power7,FXU_power7")
++
++(define_insn_reservation "power7-three" 3
++  (and (eq_attr "type" "three")
++       (eq_attr "cpu" "power7"))
++  "DU_power7+DU_power7+DU_power7,FXU_power7,FXU_power7,FXU_power7")
++
++(define_insn_reservation "power7-cmp" 1
++  (and (eq_attr "type" "cmp,fast_compare")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,FXU_power7")
++
++(define_insn_reservation "power7-compare" 2
++  (and (eq_attr "type" "compare,delayed_compare,var_delayed_compare")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,FXU_power7,FXU_power7")
++
++(define_bypass 3 "power7-cmp,power7-compare" "power7-crlogical,power7-delayedcr")
++
++(define_insn_reservation "power7-mul" 4
++  (and (eq_attr "type" "imul,imul2,imul3,lmul")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,FXU_power7")
++
++(define_insn_reservation "power7-mul-compare" 5
++  (and (eq_attr "type" "imul_compare,lmul_compare")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,FXU_power7,nothing*3,FXU_power7")
++
++(define_insn_reservation "power7-idiv" 36
++  (and (eq_attr "type" "idiv")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,iu1_power7*36|iu2_power7*36")
++
++(define_insn_reservation "power7-ldiv" 68
++  (and (eq_attr "type" "ldiv")
++       (eq_attr "cpu" "power7"))
++  "DU2F_power7,iu1_power7*68|iu2_power7*68")
++
++(define_insn_reservation "power7-isync" 1 ;
++  (and (eq_attr "type" "isync")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,FXU_power7")
++
++
++; CR Unit
++(define_insn_reservation "power7-mtjmpr" 4
++  (and (eq_attr "type" "mtjmpr")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,FXU_power7")
++
++(define_insn_reservation "power7-mfjmpr" 5
++  (and (eq_attr "type" "mfjmpr")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,cru_power7+FXU_power7")
++
++(define_insn_reservation "power7-crlogical" 3
++  (and (eq_attr "type" "cr_logical")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,cru_power7")
++
++(define_insn_reservation "power7-delayedcr" 3
++  (and (eq_attr "type" "delayed_cr")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,cru_power7")
++
++(define_insn_reservation "power7-mfcr" 6
++  (and (eq_attr "type" "mfcr")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,cru_power7")
++
++(define_insn_reservation "power7-mfcrf" 3
++  (and (eq_attr "type" "mfcrf")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,cru_power7")
++
++(define_insn_reservation "power7-mtcr" 3
++  (and (eq_attr "type" "mtcr")
++       (eq_attr "cpu" "power7"))
++  "DU4_power7,cru_power7+FXU_power7")
++
++
++; BR Unit
++; Branches take dispatch Slot 4.  The presence_sets prevent other insn from
++; grabbing previous dispatch slots once this is assigned.
++(define_insn_reservation "power7-branch" 3
++  (and (eq_attr "type" "jmpreg,branch")
++       (eq_attr "cpu" "power7"))
++  "(du5_power7\
++   |du4_power7+du5_power7\
++   |du3_power7+du4_power7+du5_power7\
++   |du2_power7+du3_power7+du4_power7+du5_power7\
++   |du1_power7+du2_power7+du3_power7+du4_power7+du5_power7),bpu_power7")
++
++
++; VS Unit (includes FP/VSX/VMX/DFP)
++(define_insn_reservation "power7-fp" 6
++  (and (eq_attr "type" "fp,dmul")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,VSU_power7")
++
++(define_bypass 8 "power7-fp" "power7-branch")
++
++(define_insn_reservation "power7-fpcompare" 4
++  (and (eq_attr "type" "fpcompare")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,VSU_power7")
++
++(define_insn_reservation "power7-sdiv" 26
++  (and (eq_attr "type" "sdiv")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,VSU_power7")
++
++(define_insn_reservation "power7-ddiv" 32
++  (and (eq_attr "type" "ddiv")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,VSU_power7")
++
++(define_insn_reservation "power7-sqrt" 31
++  (and (eq_attr "type" "ssqrt")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,VSU_power7")
++
++(define_insn_reservation "power7-dsqrt" 43
++  (and (eq_attr "type" "dsqrt")
++       (eq_attr "cpu" "power7"))
++  "DU_power7,VSU_power7")
++
++(define_insn_reservation "power7-vecsimple" 2
++  (and (eq_attr "type" "vecsimple")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,VSU_power7")
++
++(define_insn_reservation "power7-veccmp" 7
++  (and (eq_attr "type" "veccmp")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,VSU_power7")
++
++(define_insn_reservation "power7-vecfloat" 7
++  (and (eq_attr "type" "vecfloat")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,VSU_power7")
++
++(define_bypass 6 "power7-vecfloat" "power7-vecfloat")
++
++(define_insn_reservation "power7-veccomplex" 7
++  (and (eq_attr "type" "veccomplex")
++       (eq_attr "cpu" "power7"))
++  "du1_power7,VSU_power7")
++
++(define_insn_reservation "power7-vecperm" 3
++  (and (eq_attr "type" "vecperm")
++       (eq_attr "cpu" "power7"))
++  "du2_power7,VSU_power7")
++
++
+--- gcc/config/rs6000/rs6000-c.c	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/rs6000-c.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -265,6 +265,8 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfi
+     builtin_define ("_ARCH_PWR6X");
+   if (! TARGET_POWER && ! TARGET_POWER2 && ! TARGET_POWERPC)
+     builtin_define ("_ARCH_COM");
++  if (TARGET_POPCNTD)
++    builtin_define ("_ARCH_PWR7");
+   if (TARGET_ALTIVEC)
+     {
+       builtin_define ("__ALTIVEC__");
+@@ -306,6 +308,8 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfi
+   /* Used by libstdc++.  */
+   if (TARGET_NO_LWSYNC)
+     builtin_define ("__NO_LWSYNC__");
++  if (TARGET_VSX)
++    builtin_define ("__VSX__");
+ 
+   /* May be overridden by target configuration.  */
+   RS6000_CPU_CPP_ENDIAN_BUILTINS();
+--- gcc/config/rs6000/rs6000.opt	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/rs6000.opt	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -111,24 +111,44 @@ mhard-float
+ Target Report RejectNegative InverseMask(SOFT_FLOAT, HARD_FLOAT)
+ Use hardware floating point
+ 
+-mno-update
+-Target Report RejectNegative Mask(NO_UPDATE)
+-Do not generate load/store with update instructions
++mpopcntd
++Target Report Mask(POPCNTD)
++Use PowerPC V2.06 popcntd instruction
++
++mvsx
++Target Report Mask(VSX)
++Use vector/scalar (VSX) instructions
++
++mvsx-vector-memory
++Target Report Var(TARGET_VSX_VECTOR_MEMORY) Init(-1)
++If -mvsx, use VSX vector load/store instructions instead of Altivec instructions
++
++mvsx-vector-float
++Target Report Var(TARGET_VSX_VECTOR_FLOAT) Init(-1)
++If -mvsx, use VSX arithmetic instructions for float vectors (on by default)
++
++mvsx-vector-double
++Target Report Var(TARGET_VSX_VECTOR_DOUBLE) Init(-1)
++If -mvsx, use VSX arithmetic instructions for double vectors (on by default)
++
++mvsx-scalar-double
++Target Report Var(TARGET_VSX_SCALAR_DOUBLE) Init(-1)
++If -mvsx, use VSX arithmetic instructions for scalar double (on by default)
++
++mvsx-scalar-memory
++Target Report Var(TARGET_VSX_SCALAR_MEMORY)
++If -mvsx, use VSX scalar memory reference instructions for scalar double (off by default)
+ 
+ mupdate
+-Target Report RejectNegative InverseMask(NO_UPDATE, UPDATE)
++Target Report Var(TARGET_UPDATE) Init(1)
+ Generate load/store with update instructions
+ 
+ mavoid-indexed-addresses
+ Target Report Var(TARGET_AVOID_XFORM) Init(-1)
+ Avoid generation of indexed load/store instructions when possible
+ 
+-mno-fused-madd
+-Target Report RejectNegative Mask(NO_FUSED_MADD)
+-Do not generate fused multiply/add instructions
+-
+ mfused-madd
+-Target Report RejectNegative InverseMask(NO_FUSED_MADD, FUSED_MADD)
++Target Report Var(TARGET_FUSED_MADD) Init(1)
+ Generate fused multiply/add instructions
+ 
+ msched-prolog
+@@ -194,7 +214,7 @@ Target RejectNegative Joined
+ -mvrsave=yes/no	Deprecated option.  Use -mvrsave/-mno-vrsave instead
+ 
+ misel
+-Target
++Target Report Mask(ISEL)
+ Generate isel instructions
+ 
+ misel=
+--- gcc/config/rs6000/linux64.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/linux64.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -114,7 +114,7 @@ extern int dot_symbols;
+ 	    error (INVALID_32BIT, "32");			\
+ 	  if (TARGET_PROFILE_KERNEL)				\
+ 	    {							\
+-	      target_flags &= ~MASK_PROFILE_KERNEL;		\
++	      SET_PROFILE_KERNEL (0);				\
+ 	      error (INVALID_32BIT, "profile-kernel");		\
+ 	    }							\
+ 	}							\
+--- gcc/config/rs6000/rs6000.c	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/rs6000.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -178,9 +178,6 @@ int rs6000_spe;
+ /* Nonzero if we want SPE ABI extensions.  */
+ int rs6000_spe_abi;
+ 
+-/* Nonzero to use isel instructions.  */
+-int rs6000_isel;
+-
+ /* Nonzero if floating point operations are done in the GPRs.  */
+ int rs6000_float_gprs = 0;
+ 
+@@ -227,10 +224,18 @@ int dot_symbols;
+ const char *rs6000_debug_name;
+ int rs6000_debug_stack;		/* debug stack applications */
+ int rs6000_debug_arg;		/* debug argument handling */
++int rs6000_debug_reg;		/* debug register classes */
++int rs6000_debug_addr;		/* debug memory addressing */
+ 
+ /* Value is TRUE if register/mode pair is acceptable.  */
+ bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
+ 
++/* Maximum number of registers needed for a given register class and mode.  */
++unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
++
++/* How many registers are needed for a given register and mode.  */
++unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
++
+ /* Built in types.  */
+ 
+ tree rs6000_builtin_types[RS6000_BTI_MAX];
+@@ -270,7 +275,6 @@ struct {
+   bool altivec_abi;		/* True if -mabi=altivec/no-altivec used.  */
+   bool spe;			/* True if -mspe= was used.  */
+   bool float_gprs;		/* True if -mfloat-gprs= was used.  */
+-  bool isel;			/* True if -misel was used. */
+   bool long_double;	        /* True if -mlong-double- was used.  */
+   bool ieee;			/* True if -mabi=ieee/ibmlongdouble used.  */
+   bool vrsave;			/* True if -mvrsave was used.  */
+@@ -286,6 +290,18 @@ struct builtin_description
+   const char *const name;
+   const enum rs6000_builtins code;
+ };
++
++/* Describe the vector unit used for modes.  */
++enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
++enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
++enum reg_class rs6000_vector_reg_class[NUM_MACHINE_MODES];
++
++/* Describe the alignment of a vector.  */
++int rs6000_vector_align[NUM_MACHINE_MODES];
++
++/* Describe the register classes used by VSX instructions.  */
++enum reg_class rs6000_vsx_reg_class = NO_REGS;
++
+ 
+ /* Target cpu costs.  */
+ 
+@@ -749,6 +765,25 @@ struct processor_costs power6_cost = {
+   16,			/* prefetch streams */
+ };
+ 
++/* Instruction costs on POWER7 processors.  */
++static const
++struct processor_costs power7_cost = {
++  COSTS_N_INSNS (2),	/* mulsi */
++  COSTS_N_INSNS (2),	/* mulsi_const */
++  COSTS_N_INSNS (2),	/* mulsi_const9 */
++  COSTS_N_INSNS (2),	/* muldi */
++  COSTS_N_INSNS (18),	/* divsi */
++  COSTS_N_INSNS (34),	/* divdi */
++  COSTS_N_INSNS (3),	/* fp */
++  COSTS_N_INSNS (3),	/* dmul */
++  COSTS_N_INSNS (13),	/* sdiv */
++  COSTS_N_INSNS (16),	/* ddiv */
++  128,			/* cache line size */
++  32,			/* l1 cache */
++  256,			/* l2 cache */
++  12,			/* prefetch streams */
++};
++
+ 
+ static bool rs6000_function_ok_for_sibcall (tree, tree);
+ static const char *rs6000_invalid_within_doloop (const_rtx);
+@@ -963,12 +998,10 @@ static tree rs6000_gimplify_va_arg (tree
+ static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree);
+ static bool rs6000_scalar_mode_supported_p (enum machine_mode);
+ static bool rs6000_vector_mode_supported_p (enum machine_mode);
+-static int get_vec_cmp_insn (enum rtx_code, enum machine_mode,
+-			     enum machine_mode);
++static rtx rs6000_emit_vector_compare_vsx (enum rtx_code, rtx, rtx, rtx);
++static rtx rs6000_emit_vector_compare_altivec (enum rtx_code, rtx, rtx, rtx);
+ static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
+ 				       enum machine_mode);
+-static int get_vsel_insn (enum machine_mode);
+-static void rs6000_emit_vector_select (rtx, rtx, rtx, rtx);
+ static tree rs6000_stack_protect_fail (void);
+ 
+ const int INSN_NOT_AVAILABLE = -1;
+@@ -1045,6 +1078,9 @@ static const char alt_reg_names[][8] =
+ #endif
+ #ifndef TARGET_PROFILE_KERNEL
+ #define TARGET_PROFILE_KERNEL 0
++#define SET_PROFILE_KERNEL(N)
++#else
++#define SET_PROFILE_KERNEL(N) TARGET_PROFILE_KERNEL = (N)
+ #endif
+ 
+ /* The VRSAVE bitmask puts bit %v0 as the most significant bit.  */
+@@ -1299,28 +1335,96 @@ static const char alt_reg_names[][8] =
+ 
+ struct gcc_target targetm = TARGET_INITIALIZER;
+ 
++/* Return number of consecutive hard regs needed starting at reg REGNO
++   to hold something of mode MODE.
++   This is ordinarily the length in words of a value of mode MODE
++   but can be less for certain modes in special long registers.
++
++   For the SPE, GPRs are 64 bits but only 32 bits are visible in
++   scalar instructions.  The upper 32 bits are only available to the
++   SIMD instructions.
++
++   POWER and PowerPC GPRs hold 32 bits worth;
++   PowerPC64 GPRs and FPRs point register holds 64 bits worth.  */
++
++static int
++rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
++{
++  unsigned HOST_WIDE_INT reg_size;
++
++  if (FP_REGNO_P (regno))
++    reg_size = (VECTOR_UNIT_VSX_P (mode)
++		? UNITS_PER_VSX_WORD
++		: UNITS_PER_FP_WORD);
++
++  else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
++    reg_size = UNITS_PER_SPE_WORD;
++
++  else if (ALTIVEC_REGNO_P (regno))
++    reg_size = UNITS_PER_ALTIVEC_WORD;
++
++  /* The value returned for SCmode in the E500 double case is 2 for
++     ABI compatibility; storing an SCmode value in a single register
++     would require function_arg and rs6000_spe_function_arg to handle
++     SCmode so as to pass the value correctly in a pair of
++     registers.  */
++  else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
++	   && !DECIMAL_FLOAT_MODE_P (mode))
++    reg_size = UNITS_PER_FP_WORD;
++
++  else
++    reg_size = UNITS_PER_WORD;
++
++  return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
++}
+ 
+ /* Value is 1 if hard register REGNO can hold a value of machine-mode
+    MODE.  */
+ static int
+ rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
+ {
++  int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
++
++  /* VSX registers that overlap the FPR registers are larger than for non-VSX
++     implementations.  Don't allow an item to be split between a FP register
++     and an Altivec register.  */
++  if (VECTOR_UNIT_VSX_P (mode) || VECTOR_MEM_VSX_P (mode))
++    {
++      enum reg_class rclass = rs6000_vector_reg_class[mode];
++      if (FP_REGNO_P (regno))
++	return ((rclass == FLOAT_REGS || rclass == VSX_REGS)
++		&& FP_REGNO_P (last_regno));
++
++      if (ALTIVEC_REGNO_P (regno))
++	return ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
++		&& ALTIVEC_REGNO_P (last_regno));
++    }
++
+   /* The GPRs can hold any mode, but values bigger than one register
+      cannot go past R31.  */
+   if (INT_REGNO_P (regno))
+-    return INT_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1);
++    return INT_REGNO_P (last_regno);
+ 
+-  /* The float registers can only hold floating modes and DImode.
+-     This excludes the 32-bit decimal float mode for now.  */
++  /* The float registers (except for VSX vector modes) can only hold floating
++     modes and DImode.  This excludes the 32-bit decimal float mode for
++     now.  */
+   if (FP_REGNO_P (regno))
+-    return
+-      ((SCALAR_FLOAT_MODE_P (mode)
+-       && (mode != TDmode || (regno % 2) == 0)
+-       && FP_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1))
+-      || (GET_MODE_CLASS (mode) == MODE_INT
++    {
++      if (SCALAR_FLOAT_MODE_P (mode)
++	  && (mode != TDmode || (regno % 2) == 0)
++	  && FP_REGNO_P (last_regno))
++	return 1;
++
++      if (GET_MODE_CLASS (mode) == MODE_INT
+ 	  && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
+-      || (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
+-           && PAIRED_VECTOR_MODE (mode)));
++	return 1;
++
++      if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
++	  && PAIRED_VECTOR_MODE (mode))
++	return 1;
++
++      return 0;
++    }
+ 
+   /* The CR register can only hold CC modes.  */
+   if (CR_REGNO_P (regno))
+@@ -1331,28 +1435,312 @@ rs6000_hard_regno_mode_ok (int regno, en
+ 
+   /* AltiVec only in AldyVec registers.  */
+   if (ALTIVEC_REGNO_P (regno))
+-    return ALTIVEC_VECTOR_MODE (mode);
++    return VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode);
+ 
+   /* ...but GPRs can hold SIMD data on the SPE in one register.  */
+   if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+     return 1;
+ 
+-  /* We cannot put TImode anywhere except general register and it must be
+-     able to fit within the register set.  */
++  /* Don't allow anything but word sized integers (aka pointers) in CTR/LR.  You
++     really don't want to spill your floating point values to those
++     registers.  Also do it for the old MQ register in the power.  */
++  if (regno == CTR_REGNO || regno == LR_REGNO || regno == MQ_REGNO)
++    return (GET_MODE_CLASS (mode) == MODE_INT
++	    && GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
++
++  /* The VRSAVE/VSCR registers are 32-bits (they are fixed, but add this for
++     completeness).  */
++  if (regno == VRSAVE_REGNO || regno == VSCR_REGNO)
++    return (mode == SImode);
++
++  /* We cannot put TImode anywhere except general register and it must be able
++     to fit within the register set.  In the future, allow TImode in the
++     Altivec or VSX registers.  */
+ 
+   return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
+ }
+ 
+-/* Initialize rs6000_hard_regno_mode_ok_p table.  */
++/* Print interesting facts about registers.  */
+ static void
+-rs6000_init_hard_regno_mode_ok (void)
++rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
+ {
+   int r, m;
+ 
++  for (r = first_regno; r <= last_regno; ++r)
++    {
++      const char *comma = "";
++      int len;
++
++      if (first_regno == last_regno)
++	fprintf (stderr, "%s:\t", reg_name);
++      else
++	fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
++
++      len = 8;
++      for (m = 0; m < NUM_MACHINE_MODES; ++m)
++	if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
++	  {
++	    if (len > 70)
++	      {
++		fprintf (stderr, ",\n\t");
++		len = 8;
++		comma = "";
++	      }
++
++	    if (rs6000_hard_regno_nregs[m][r] > 1)
++	      len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
++			     rs6000_hard_regno_nregs[m][r]);
++	    else
++	      len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
++
++	    comma = ", ";
++	  }
++
++      if (call_used_regs[r])
++	{
++	  if (len > 70)
++	    {
++	      fprintf (stderr, ",\n\t");
++	      len = 8;
++	      comma = "";
++	    }
++
++	  len += fprintf (stderr, "%s%s", comma, "call-used");
++	  comma = ", ";
++	}
++
++      if (fixed_regs[r])
++	{
++	  if (len > 70)
++	    {
++	      fprintf (stderr, ",\n\t");
++	      len = 8;
++	      comma = "";
++	    }
++
++	  len += fprintf (stderr, "%s%s", comma, "fixed");
++	  comma = ", ";
++	}
++
++      if (len > 70)
++	{
++	  fprintf (stderr, ",\n\t");
++	  comma = "";
++	}
++
++      fprintf (stderr, "%sregno = %d\n", comma, r);
++    }
++}
++
++/* Map enum rs6000_vector to string.  */
++static const char *
++rs6000_debug_vector_unit[] = {
++  "none",
++  "altivec",
++  "vsx",
++  "paired",
++  "spe",
++  "other"
++};
++
++/* Initialize the various global tables that are based on register size.  */
++static void
++rs6000_init_hard_regno_mode_ok (void)
++{
++  int r, m, c;
++  enum reg_class vsx_rc = (TARGET_ALTIVEC ? VSX_REGS : FLOAT_REGS);
++  bool float_p = (TARGET_HARD_FLOAT && TARGET_FPRS);
++
++  /* Precalculate vector information, this must be set up before the
++     rs6000_hard_regno_nregs_internal below.  */
++  for (m = 0; m < NUM_MACHINE_MODES; ++m)
++    {
++      rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
++      rs6000_vector_reg_class[m] = NO_REGS;
++    }
++
++  /* TODO, add TI/V2DI mode for moving data if Altivec or VSX.  */
++
++  /* V2DF mode, VSX only.  */
++  if (float_p && TARGET_VSX && TARGET_VSX_VECTOR_DOUBLE)
++    {
++      rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
++      rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
++      rs6000_vector_align[V2DFmode] = 64;
++    }
++
++  /* V4SF mode, either VSX or Altivec.  */
++  if (float_p && TARGET_VSX && TARGET_VSX_VECTOR_FLOAT)
++    {
++      rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
++      if (TARGET_VSX_VECTOR_MEMORY || !TARGET_ALTIVEC)
++	{
++	  rs6000_vector_align[V4SFmode] = 32;
++	  rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
++	} else {
++	  rs6000_vector_align[V4SFmode] = 128;
++	  rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
++      }
++    }
++  else if (float_p && TARGET_ALTIVEC)
++    {
++      rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
++      rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
++      rs6000_vector_align[V4SFmode] = 128;
++    }
++
++  /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
++     and stores. */
++  if (TARGET_ALTIVEC)
++    {
++      rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
++      rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
++      rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
++
++      rs6000_vector_reg_class[V16QImode] = ALTIVEC_REGS;
++      rs6000_vector_reg_class[V8HImode] = ALTIVEC_REGS;
++      rs6000_vector_reg_class[V4SImode] = ALTIVEC_REGS;
++
++      if (TARGET_VSX && TARGET_VSX_VECTOR_MEMORY)
++	{
++	  rs6000_vector_mem[V4SImode] = VECTOR_VSX;
++	  rs6000_vector_mem[V8HImode] = VECTOR_VSX;
++	  rs6000_vector_mem[V16QImode] = VECTOR_VSX;
++	  rs6000_vector_align[V4SImode] = 32;
++	  rs6000_vector_align[V8HImode] = 32;
++	  rs6000_vector_align[V16QImode] = 32;
++	}
++      else
++	{
++	  rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
++	  rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
++	  rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
++	  rs6000_vector_align[V4SImode] = 128;
++	  rs6000_vector_align[V8HImode] = 128;
++	  rs6000_vector_align[V16QImode] = 128;
++	}
++    }
++
++  /* DFmode, see if we want to use the VSX unit.  */
++  if (float_p && TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
++    {
++      rs6000_vector_unit[DFmode] = VECTOR_VSX;
++      rs6000_vector_align[DFmode] = 64;
++      rs6000_vector_mem[DFmode]
++	= (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
++    }
++
++  /* TODO, add SPE and paired floating point vector support.  */
++
++  /* Set the VSX register classes.  */
++  rs6000_vector_reg_class[V4SFmode]
++    = ((VECTOR_UNIT_VSX_P (V4SFmode) && VECTOR_MEM_VSX_P (V4SFmode))
++       ? vsx_rc
++       : (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
++	  ? ALTIVEC_REGS
++	  : NO_REGS));
++
++  rs6000_vector_reg_class[V2DFmode]
++    = (VECTOR_UNIT_VSX_P (V2DFmode) ? vsx_rc : NO_REGS);
++
++  rs6000_vector_reg_class[DFmode]
++    = ((!float_p || !VECTOR_UNIT_VSX_P (DFmode))
++       ? NO_REGS
++       : ((TARGET_VSX_SCALAR_MEMORY)
++	  ? vsx_rc
++	  : FLOAT_REGS));
++
++  rs6000_vsx_reg_class = (float_p && TARGET_VSX) ? vsx_rc : NO_REGS;
++
++  /* Precalculate HARD_REGNO_NREGS.  */
++  for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
++    for (m = 0; m < NUM_MACHINE_MODES; ++m)
++      rs6000_hard_regno_nregs[m][r] = rs6000_hard_regno_nregs_internal (r, m);
++
++  /* Precalculate HARD_REGNO_MODE_OK.  */
+   for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
+     for (m = 0; m < NUM_MACHINE_MODES; ++m)
+       if (rs6000_hard_regno_mode_ok (r, m))
+ 	rs6000_hard_regno_mode_ok_p[m][r] = true;
++
++  /* Precalculate CLASSS_MAX_NREGS sizes.  */
++  for (c = 0; c < LIM_REG_CLASSES; ++c)
++    {
++      int reg_size;
++
++      if (TARGET_VSX && VSX_REG_CLASS_P (c))
++	reg_size = UNITS_PER_VSX_WORD;
++
++      else if (c == ALTIVEC_REGS)
++	reg_size = UNITS_PER_ALTIVEC_WORD;
++
++      else if (c == FLOAT_REGS)
++	reg_size = UNITS_PER_FP_WORD;
++
++      else
++	reg_size = UNITS_PER_WORD;
++
++      for (m = 0; m < NUM_MACHINE_MODES; ++m)
++	rs6000_class_max_nregs[m][c]
++	  = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size;
++    }
++
++  if (TARGET_E500_DOUBLE)
++    rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
++
++  if (TARGET_DEBUG_REG)
++    {
++      const char *nl = (const char *)0;
++
++      fprintf (stderr, "Register information: (last virtual reg = %d)\n",
++	       LAST_VIRTUAL_REGISTER);
++      rs6000_debug_reg_print (0, 31, "gr");
++      rs6000_debug_reg_print (32, 63, "fp");
++      rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
++			      LAST_ALTIVEC_REGNO,
++			      "vs");
++      rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
++      rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
++      rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
++      rs6000_debug_reg_print (MQ_REGNO, MQ_REGNO, "mq");
++      rs6000_debug_reg_print (XER_REGNO, XER_REGNO, "xer");
++      rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
++      rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
++      rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
++      rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
++
++      fprintf (stderr,
++	       "\n"
++	       "V16QI reg_class = %s\n"
++	       "V8HI  reg_class = %s\n"
++	       "V4SI  reg_class = %s\n"
++	       "V2DI  reg_class = %s\n"
++	       "V4SF  reg_class = %s\n"
++	       "V2DF  reg_class = %s\n"
++	       "DF    reg_class = %s\n"
++	       "vsx   reg_class = %s\n\n",
++	       reg_class_names[rs6000_vector_reg_class[V16QImode]],
++	       reg_class_names[rs6000_vector_reg_class[V8HImode]],
++	       reg_class_names[rs6000_vector_reg_class[V4SImode]],
++	       reg_class_names[rs6000_vector_reg_class[V2DImode]],
++	       reg_class_names[rs6000_vector_reg_class[V4SFmode]],
++	       reg_class_names[rs6000_vector_reg_class[V2DFmode]],
++	       reg_class_names[rs6000_vector_reg_class[DFmode]],
++	       reg_class_names[rs6000_vsx_reg_class]);
++
++      for (m = 0; m < NUM_MACHINE_MODES; ++m)
++	if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
++	  {
++	    nl = "\n";
++	    fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
++		     GET_MODE_NAME (m),
++		     rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
++		     rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
++	  }
++
++      if (nl)
++	fputs (nl, stderr);
++    }
+ }
+ 
+ #if TARGET_MACHO
+@@ -1482,12 +1870,15 @@ rs6000_override_options (const char *def
+ 	 {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ 	 {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ 	 {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+-	 {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
++	 {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN
++	  | MASK_ISEL},
+ 	 /* 8548 has a dummy entry for now.  */
+-	 {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
++	 {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN
++	  | MASK_ISEL},
+ 	 {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ 	 {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK},
+-	 {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
++	 {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT
++	  | MASK_ISEL},
+ 	 {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ 	 {"970", PROCESSOR_POWER4,
+ 	  POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
+@@ -1520,9 +1911,10 @@ rs6000_override_options (const char *def
+ 	  POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
+ 	  | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP
+ 	  | MASK_MFPGPR},
+-	 {"power7", PROCESSOR_POWER5,
++	 {"power7", PROCESSOR_POWER7,
+ 	  POWERPC_7400_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_MFCRF
+-	  | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
++	  | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP | MASK_POPCNTD
++	  | MASK_VSX},	/* Don't add MASK_ISEL by default */
+ 	 {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
+ 	 {"powerpc64", PROCESSOR_POWERPC64,
+ 	  POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+@@ -1549,7 +1941,8 @@ rs6000_override_options (const char *def
+     POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
+ 		     | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
+ 		     | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
+-		     | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP)
++		     | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
++		     | MASK_POPCNTD | MASK_VSX | MASK_ISEL)
+   };
+ 
+   set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
+@@ -1594,10 +1987,6 @@ rs6000_override_options (const char *def
+ 	}
+     }
+ 
+-  if ((TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC)
+-      && !rs6000_explicit_options.isel)
+-    rs6000_isel = 1;
+-
+   if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
+       || rs6000_cpu == PROCESSOR_PPCE500MC)
+     {
+@@ -1642,15 +2031,47 @@ rs6000_override_options (const char *def
+ 	}
+     }
+ 
++  /* Add some warnings for VSX.  Enable -maltivec unless the user explicitly
++     used -mno-altivec  */
++  if (TARGET_VSX)
++    {
++      const char *msg = NULL;
++      if (!TARGET_HARD_FLOAT || !TARGET_FPRS
++	  || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
++	msg = "-mvsx requires hardware floating point";
++      else if (TARGET_PAIRED_FLOAT)
++	msg = "-mvsx and -mpaired are incompatible";
++      /* The hardware will allow VSX and little endian, but until we make sure
++	 things like vector select, etc. work don't allow VSX on little endian
++	 systems at this point.  */
++      else if (!BYTES_BIG_ENDIAN)
++	msg = "-mvsx used with little endian code";
++      else if (TARGET_AVOID_XFORM > 0)
++	msg = "-mvsx needs indexed addressing";
++
++      if (msg)
++	{
++	  warning (0, msg);
++	  target_flags &= MASK_VSX;
++	}
++      else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC) == 0)
++	target_flags |= MASK_ALTIVEC;
++    }
++
+   /* Set debug flags */
+   if (rs6000_debug_name)
+     {
+       if (! strcmp (rs6000_debug_name, "all"))
+-	rs6000_debug_stack = rs6000_debug_arg = 1;
++	rs6000_debug_stack = rs6000_debug_arg = rs6000_debug_reg
++	  = rs6000_debug_addr = 1;
+       else if (! strcmp (rs6000_debug_name, "stack"))
+ 	rs6000_debug_stack = 1;
+       else if (! strcmp (rs6000_debug_name, "arg"))
+ 	rs6000_debug_arg = 1;
++      else if (! strcmp (rs6000_debug_name, "reg"))
++	rs6000_debug_reg = 1;
++      else if (! strcmp (rs6000_debug_name, "addr"))
++	rs6000_debug_addr = 1;
+       else
+ 	error ("unknown -mdebug-%s switch", rs6000_debug_name);
+     }
+@@ -1741,8 +2162,8 @@ rs6000_override_options (const char *def
+ 	rs6000_spe = 0;
+       if (!rs6000_explicit_options.float_gprs)
+ 	rs6000_float_gprs = 0;
+-      if (!rs6000_explicit_options.isel)
+-	rs6000_isel = 0;
++      if (!(target_flags_explicit & MASK_ISEL))
++	target_flags &= ~MASK_ISEL;
+     }
+ 
+   /* Detect invalid option combinations with E500.  */
+@@ -1751,12 +2172,14 @@ rs6000_override_options (const char *def
+   rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
+ 			&& rs6000_cpu != PROCESSOR_POWER5
+                         && rs6000_cpu != PROCESSOR_POWER6
++                        && rs6000_cpu != PROCESSOR_POWER7
+ 			&& rs6000_cpu != PROCESSOR_CELL);
+   rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
+ 			 || rs6000_cpu == PROCESSOR_POWER5);
+   rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
+                                  || rs6000_cpu == PROCESSOR_POWER5
+-                                 || rs6000_cpu == PROCESSOR_POWER6);
++                                 || rs6000_cpu == PROCESSOR_POWER6
++                                 || rs6000_cpu == PROCESSOR_POWER7);
+ 
+   rs6000_sched_restricted_insns_priority
+     = (rs6000_sched_groups ? 1 : 0);
+@@ -1951,6 +2374,10 @@ rs6000_override_options (const char *def
+ 	rs6000_cost = &power6_cost;
+ 	break;
+ 
++      case PROCESSOR_POWER7:
++	rs6000_cost = &power7_cost;
++	break;
++
+       default:
+ 	gcc_unreachable ();
+       }
+@@ -2001,7 +2428,7 @@ rs6000_override_options (const char *def
+ static tree
+ rs6000_builtin_mask_for_load (void)
+ {
+-  if (TARGET_ALTIVEC)
++  if (TARGET_ALTIVEC && !TARGET_VSX)
+     return altivec_builtin_mask_for_load;
+   else
+     return 0;
+@@ -2015,18 +2442,19 @@ rs6000_builtin_mask_for_load (void)
+ static tree
+ rs6000_builtin_conversion (enum tree_code code, tree type)
+ {
+-  if (!TARGET_ALTIVEC)
+-    return NULL_TREE;
+-
+   switch (code)
+     {
+     case FIX_TRUNC_EXPR:
+       switch (TYPE_MODE (type))
+ 	{
+ 	case V4SImode:
++	  if (VECTOR_UNIT_NONE_P (V4SImode) || VECTOR_UNIT_NONE_P (V4SFmode))
++	    return NULL_TREE;
++
+ 	  return TYPE_UNSIGNED (type)
+-	    ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTUXS]
+-	    : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTSXS];
++	    ? rs6000_builtin_decls[VECTOR_BUILTIN_FIXUNS_V4SF_V4SI]
++	    : rs6000_builtin_decls[VECTOR_BUILTIN_FIX_V4SF_V4SI];
++
+ 	default:
+ 	  return NULL_TREE;
+ 	}
+@@ -2035,9 +2463,12 @@ rs6000_builtin_conversion (enum tree_cod
+       switch (TYPE_MODE (type))
+ 	{
+ 	case V4SImode:
++	  if (VECTOR_UNIT_NONE_P (V4SImode) || VECTOR_UNIT_NONE_P (V4SFmode))
++	    return NULL_TREE;
++
+ 	  return TYPE_UNSIGNED (type)
+-	    ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFUX]
+-	    : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFSX];
++	    ? rs6000_builtin_decls[VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF]
++	    : rs6000_builtin_decls[VECTOR_BUILTIN_FLOAT_V4SI_V4SF];
+ 	default:
+ 	  return NULL_TREE;
+ 	}
+@@ -2150,6 +2581,14 @@ rs6000_builtin_vec_perm (tree type, tree
+       d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SF];
+       break;
+ 
++    case V2DFmode:
++      d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_2DF];
++      break;
++
++    case V2DImode:
++      d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_2DI];
++      break;
++
+     default:
+       return NULL_TREE;
+     }
+@@ -2229,6 +2668,7 @@ static bool
+ rs6000_handle_option (size_t code, const char *arg, int value)
+ {
+   enum fpu_type_t fpu_type = FPU_NONE;
++  int isel;
+ 
+   switch (code)
+     {
+@@ -2331,14 +2771,14 @@ rs6000_handle_option (size_t code, const
+       rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
+       break;
+ 
+-    case OPT_misel:
+-      rs6000_explicit_options.isel = true;
+-      rs6000_isel = value;
+-      break;
+-
+     case OPT_misel_:
+-      rs6000_explicit_options.isel = true;
+-      rs6000_parse_yes_no_option ("isel", arg, &(rs6000_isel));
++      target_flags_explicit |= MASK_ISEL;
++      isel = 0;
++      rs6000_parse_yes_no_option ("isel", arg, &isel);
++      if (isel)
++	target_flags |= MASK_ISEL;
++      else
++	target_flags &= ~MASK_ISEL;
+       break;
+ 
+     case OPT_mspe:
+@@ -2967,6 +3407,9 @@ output_vec_const_move (rtx *operands)
+   vec = operands[1];
+   mode = GET_MODE (dest);
+ 
++  if (TARGET_VSX && zero_constant (vec, mode))
++    return "xxlxor %x0,%x0,%x0";
++
+   if (TARGET_ALTIVEC)
+     {
+       rtx splat_vec;
+@@ -3190,20 +3633,21 @@ rs6000_expand_vector_init (rtx target, r
+   if (n_var == 0)
+     {
+       rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
+-      if (mode != V4SFmode && all_const_zero)
++      bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
++      if ((int_vector_p || TARGET_VSX) && all_const_zero)
+ 	{
+ 	  /* Zero register.  */
+ 	  emit_insn (gen_rtx_SET (VOIDmode, target,
+ 				  gen_rtx_XOR (mode, target, target)));
+ 	  return;
+ 	}
+-      else if (mode != V4SFmode && easy_vector_constant (const_vec, mode))
++      else if (int_vector_p && easy_vector_constant (const_vec, mode))
+ 	{
+ 	  /* Splat immediate.  */
+ 	  emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
+ 	  return;
+ 	}
+-      else if (all_same)
++      else if (all_same && int_vector_p)
+ 	;	/* Splat vector element.  */
+       else
+ 	{
+@@ -3213,6 +3657,18 @@ rs6000_expand_vector_init (rtx target, r
+ 	}
+     }
+ 
++  if (mode == V2DFmode)
++    {
++      gcc_assert (TARGET_VSX);
++      if (all_same)
++	emit_insn (gen_vsx_splatv2df (target, XVECEXP (vals, 0, 0)));
++      else
++	emit_insn (gen_vsx_concat_v2df (target,
++					copy_to_reg (XVECEXP (vals, 0, 0)),
++					copy_to_reg (XVECEXP (vals, 0, 1))));
++      return;
++    }
++
+   /* Store value to stack temp.  Load vector element.  Splat.  */
+   if (all_same)
+     {
+@@ -3272,6 +3728,13 @@ rs6000_expand_vector_set (rtx target, rt
+   int width = GET_MODE_SIZE (inner_mode);
+   int i;
+ 
++  if (mode == V2DFmode)
++    {
++      gcc_assert (TARGET_VSX);
++      emit_insn (gen_vsx_set_v2df (target, val, target, GEN_INT (elt)));
++      return;
++    }
++
+   /* Load single variable value.  */
+   mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+   emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
+@@ -3309,6 +3772,13 @@ rs6000_expand_vector_extract (rtx target
+   enum machine_mode inner_mode = GET_MODE_INNER (mode);
+   rtx mem, x;
+ 
++  if (mode == V2DFmode)
++    {
++      gcc_assert (TARGET_VSX);
++      emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
++      return;
++    }
++
+   /* Allocate mode-sized buffer.  */
+   mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ 
+@@ -3627,9 +4097,13 @@ rs6000_legitimate_offset_address_p (enum
+     case V8HImode:
+     case V4SFmode:
+     case V4SImode:
+-      /* AltiVec vector modes.  Only reg+reg addressing is valid and
++    case V2DFmode:
++    case V2DImode:
++      /* AltiVec/VSX vector modes.  Only reg+reg addressing is valid and
+ 	 constant offset zero should not occur due to canonicalization.  */
+-      return false;
++      if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode))
++	return false;
++      break;
+ 
+     case V4HImode:
+     case V2SImode:
+@@ -3646,6 +4120,11 @@ rs6000_legitimate_offset_address_p (enum
+       if (TARGET_E500_DOUBLE)
+ 	return SPE_CONST_OFFSET_OK (offset);
+ 
++      /* If we are using VSX scalar loads, restrict ourselves to reg+reg
++	 addressing.  */
++      if (VECTOR_MEM_VSX_P (DFmode))
++	return false;
++
+     case DDmode:
+     case DImode:
+       /* On e500v2, we may have:
+@@ -3716,7 +4195,9 @@ avoiding_indexed_address_p (enum machine
+ {
+   /* Avoid indexed addressing for modes that have non-indexed
+      load/store instruction forms.  */
+-  return TARGET_AVOID_XFORM && !ALTIVEC_VECTOR_MODE (mode);
++  return (TARGET_AVOID_XFORM
++	  && (!TARGET_ALTIVEC || !ALTIVEC_VECTOR_MODE (mode))
++	  && (!TARGET_VSX || !VSX_VECTOR_MODE (mode)));
+ }
+ 
+ inline bool
+@@ -3808,25 +4289,30 @@ rtx
+ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ 			   enum machine_mode mode)
+ {
++  rtx ret = NULL_RTX;
++  rtx orig_x = x;
++
+   if (GET_CODE (x) == SYMBOL_REF)
+     {
+       enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
+       if (model != 0)
+-	return rs6000_legitimize_tls_address (x, model);
++	ret = rs6000_legitimize_tls_address (x, model);
+     }
+ 
+-  if (GET_CODE (x) == PLUS
+-      && GET_CODE (XEXP (x, 0)) == REG
+-      && GET_CODE (XEXP (x, 1)) == CONST_INT
+-      && (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000
+-      && !((TARGET_POWERPC64
+-	    && (mode == DImode || mode == TImode)
+-	    && (INTVAL (XEXP (x, 1)) & 3) != 0)
+-	   || SPE_VECTOR_MODE (mode)
+-	   || ALTIVEC_VECTOR_MODE (mode)
+-	   || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+-				      || mode == DImode || mode == DDmode
+-				      || mode == TDmode))))
++  else if (GET_CODE (x) == PLUS
++	   && GET_CODE (XEXP (x, 0)) == REG
++	   && GET_CODE (XEXP (x, 1)) == CONST_INT
++	   && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
++	       >= 0x10000)
++	   && !((TARGET_POWERPC64
++		 && (mode == DImode || mode == TImode)
++		 && (INTVAL (XEXP (x, 1)) & 3) != 0)
++		|| (TARGET_SPE && SPE_VECTOR_MODE (mode))
++		|| (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
++		|| (TARGET_VSX && VSX_VECTOR_MODE (mode))
++		|| (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
++					   || mode == DImode || mode == DDmode
++					   || mode == TDmode))))
+     {
+       HOST_WIDE_INT high_int, low_int;
+       rtx sum;
+@@ -3834,7 +4320,7 @@ rs6000_legitimize_address (rtx x, rtx ol
+       high_int = INTVAL (XEXP (x, 1)) - low_int;
+       sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ 					 GEN_INT (high_int)), 0);
+-      return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
++      ret = gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
+     }
+   else if (GET_CODE (x) == PLUS
+ 	   && GET_CODE (XEXP (x, 0)) == REG
+@@ -3850,32 +4336,29 @@ rs6000_legitimize_address (rtx x, rtx ol
+ 	   && mode != TFmode
+ 	   && mode != TDmode)
+     {
+-      return gen_rtx_PLUS (Pmode, XEXP (x, 0),
+-			   force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
++      ret = gen_rtx_PLUS (Pmode, XEXP (x, 0),
++			  force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
+     }
+-  else if (ALTIVEC_VECTOR_MODE (mode))
++  else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
+     {
+-      rtx reg;
+-
+       /* Make sure both operands are registers.  */
+       if (GET_CODE (x) == PLUS)
+-	return gen_rtx_PLUS (Pmode, force_reg (Pmode, XEXP (x, 0)),
+-			     force_reg (Pmode, XEXP (x, 1)));
+-
+-      reg = force_reg (Pmode, x);
+-      return reg;
++	ret = gen_rtx_PLUS (Pmode, force_reg (Pmode, XEXP (x, 0)),
++			    force_reg (Pmode, XEXP (x, 1)));
++      else
++	ret = force_reg (Pmode, x);
+     }
+-  else if (SPE_VECTOR_MODE (mode)
++  else if ((TARGET_SPE && SPE_VECTOR_MODE (mode))
+ 	   || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+ 				      || mode == DDmode || mode == TDmode
+ 				      || mode == DImode)))
+     {
+       if (mode == DImode)
+-	return NULL_RTX;
+-      /* We accept [reg + reg] and [reg + OFFSET].  */
++	ret = NULL_RTX;
+ 
+-      if (GET_CODE (x) == PLUS)
+-       {
++      /* We accept [reg + reg] and [reg + OFFSET].  */
++      else if (GET_CODE (x) == PLUS)
++	{
+          rtx op1 = XEXP (x, 0);
+          rtx op2 = XEXP (x, 1);
+          rtx y;
+@@ -3894,12 +4377,12 @@ rs6000_legitimize_address (rtx x, rtx ol
+          y = gen_rtx_PLUS (Pmode, op1, op2);
+ 
+          if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
+-           return force_reg (Pmode, y);
++           ret = force_reg (Pmode, y);
+          else
+-           return y;
++           ret = y;
+        }
+-
+-      return force_reg (Pmode, x);
++      else
++	ret = force_reg (Pmode, x);
+     }
+   else if (TARGET_ELF
+ 	   && TARGET_32BIT
+@@ -3915,7 +4398,7 @@ rs6000_legitimize_address (rtx x, rtx ol
+     {
+       rtx reg = gen_reg_rtx (Pmode);
+       emit_insn (gen_elf_high (reg, x));
+-      return gen_rtx_LO_SUM (Pmode, reg, x);
++      ret = gen_rtx_LO_SUM (Pmode, reg, x);
+     }
+   else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
+ 	   && ! flag_pic
+@@ -3933,17 +4416,34 @@ rs6000_legitimize_address (rtx x, rtx ol
+     {
+       rtx reg = gen_reg_rtx (Pmode);
+       emit_insn (gen_macho_high (reg, x));
+-      return gen_rtx_LO_SUM (Pmode, reg, x);
++      ret = gen_rtx_LO_SUM (Pmode, reg, x);
+     }
+   else if (TARGET_TOC
+ 	   && GET_CODE (x) == SYMBOL_REF
+ 	   && constant_pool_expr_p (x)
+ 	   && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
+     {
+-      return create_TOC_reference (x);
++      ret = create_TOC_reference (x);
+     }
+   else
+-    return NULL_RTX;
++    ret = NULL_RTX;
++
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr,
++	       "\nrs6000_legitimize_address: mode %s, original addr:\n",
++	       GET_MODE_NAME (mode));
++      debug_rtx (orig_x);
++      if (ret)
++	{
++	  fprintf (stderr, "New addr:\n");
++	  debug_rtx (ret);
++	}
++      else
++	fprintf (stderr, "NULL returned\n");
++    }
++
++  return ret;
+ }
+ 
+ /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
+@@ -4232,6 +4732,9 @@ rs6000_legitimize_reload_address (rtx x,
+ 				  int opnum, int type,
+ 				  int ind_levels ATTRIBUTE_UNUSED, int *win)
+ {
++  rtx orig_x = x;
++  rtx ret = NULL_RTX;
++
+   /* We must recognize output that we have already generated ourselves.  */
+   if (GET_CODE (x) == PLUS
+       && GET_CODE (XEXP (x, 0)) == PLUS
+@@ -4243,17 +4746,17 @@ rs6000_legitimize_reload_address (rtx x,
+ 		   BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ 		   opnum, (enum reload_type)type);
+       *win = 1;
+-      return x;
++      ret = x;
+     }
+ 
+ #if TARGET_MACHO
+-  if (DEFAULT_ABI == ABI_DARWIN && flag_pic
+-      && GET_CODE (x) == LO_SUM
+-      && GET_CODE (XEXP (x, 0)) == PLUS
+-      && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
+-      && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
+-      && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
+-      && machopic_operand_p (XEXP (x, 1)))
++  else if (DEFAULT_ABI == ABI_DARWIN && flag_pic
++	   && GET_CODE (x) == LO_SUM
++	   && GET_CODE (XEXP (x, 0)) == PLUS
++	   && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
++	   && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
++	   && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
++	   && machopic_operand_p (XEXP (x, 1)))
+     {
+       /* Result of previous invocation of this function on Darwin
+ 	 floating point constant.  */
+@@ -4261,40 +4764,42 @@ rs6000_legitimize_reload_address (rtx x,
+ 		   BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ 		   opnum, (enum reload_type)type);
+       *win = 1;
+-      return x;
++      ret = x;
+     }
+ #endif
+ 
+   /* Force ld/std non-word aligned offset into base register by wrapping
+      in offset 0.  */
+-  if (GET_CODE (x) == PLUS
+-      && GET_CODE (XEXP (x, 0)) == REG
+-      && REGNO (XEXP (x, 0)) < 32
+-      && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+-      && GET_CODE (XEXP (x, 1)) == CONST_INT
+-      && (INTVAL (XEXP (x, 1)) & 3) != 0
+-      && !ALTIVEC_VECTOR_MODE (mode)
+-      && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
+-      && TARGET_POWERPC64)
++  else if (GET_CODE (x) == PLUS
++	   && GET_CODE (XEXP (x, 0)) == REG
++	   && REGNO (XEXP (x, 0)) < 32
++	   && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
++	   && GET_CODE (XEXP (x, 1)) == CONST_INT
++	   && (INTVAL (XEXP (x, 1)) & 3) != 0
++	   && !ALTIVEC_VECTOR_MODE (mode)
++	   && !VSX_VECTOR_MODE (mode)
++	   && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
++	   && TARGET_POWERPC64)
+     {
+       x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
+       push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ 		   BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ 		   opnum, (enum reload_type) type);
+       *win = 1;
+-      return x;
++      ret = x;
+     }
+ 
+-  if (GET_CODE (x) == PLUS
+-      && GET_CODE (XEXP (x, 0)) == REG
+-      && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+-      && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+-      && GET_CODE (XEXP (x, 1)) == CONST_INT
+-      && !SPE_VECTOR_MODE (mode)
+-      && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+-				  || mode == DDmode || mode == TDmode
+-				  || mode == DImode))
+-      && !ALTIVEC_VECTOR_MODE (mode))
++  else if (GET_CODE (x) == PLUS
++	   && GET_CODE (XEXP (x, 0)) == REG
++	   && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
++	   && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
++	   && GET_CODE (XEXP (x, 1)) == CONST_INT
++	   && !SPE_VECTOR_MODE (mode)
++	   && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
++				       || mode == DDmode || mode == TDmode
++				       || mode == DImode))
++	   && !ALTIVEC_VECTOR_MODE (mode)
++	   && !VSX_VECTOR_MODE (mode))
+     {
+       HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+       HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+@@ -4305,42 +4810,44 @@ rs6000_legitimize_reload_address (rtx x,
+       if (high + low != val)
+ 	{
+ 	  *win = 0;
+-	  return x;
++	  ret = x;
+ 	}
++      else
++	{
++	  /* Reload the high part into a base reg; leave the low part
++	     in the mem directly.  */
+ 
+-      /* Reload the high part into a base reg; leave the low part
+-	 in the mem directly.  */
+-
+-      x = gen_rtx_PLUS (GET_MODE (x),
+-			gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+-				      GEN_INT (high)),
+-			GEN_INT (low));
++	  x = gen_rtx_PLUS (GET_MODE (x),
++			    gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
++					  GEN_INT (high)),
++			    GEN_INT (low));
+ 
+-      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+-		   BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+-		   opnum, (enum reload_type)type);
+-      *win = 1;
+-      return x;
++	  push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
++		       BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
++		       opnum, (enum reload_type)type);
++	  *win = 1;
++	  return x;
++	}
+     }
+ 
+-  if (GET_CODE (x) == SYMBOL_REF
+-      && !ALTIVEC_VECTOR_MODE (mode)
+-      && !SPE_VECTOR_MODE (mode)
++  else if (GET_CODE (x) == SYMBOL_REF
++	   && VECTOR_MEM_NONE_P (mode)
++	   && (!TARGET_SPE || !SPE_VECTOR_MODE (mode))
+ #if TARGET_MACHO
+-      && DEFAULT_ABI == ABI_DARWIN
+-      && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
++	   && DEFAULT_ABI == ABI_DARWIN
++	   && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ #else
+-      && DEFAULT_ABI == ABI_V4
+-      && !flag_pic
++	   && DEFAULT_ABI == ABI_V4
++	   && !flag_pic
+ #endif
+-      /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
+-	 The same goes for DImode without 64-bit gprs and DFmode and DDmode
+-	 without fprs.  */
+-      && mode != TFmode
+-      && mode != TDmode
+-      && (mode != DImode || TARGET_POWERPC64)
+-      && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
+-	  || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
++	   /* Don't do this for TFmode or TDmode, since the result isn't
++	      offsettable.  The same goes for DImode without 64-bit gprs and
++	      DFmode and DDmode without fprs.  */
++	   && mode != TFmode
++	   && mode != TDmode
++	   && (mode != DImode || TARGET_POWERPC64)
++	   && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
++	       || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
+     {
+ #if TARGET_MACHO
+       if (flag_pic)
+@@ -4359,37 +4866,61 @@ rs6000_legitimize_reload_address (rtx x,
+ 		   BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ 		   opnum, (enum reload_type)type);
+       *win = 1;
+-      return x;
++      ret = x;
+     }
+ 
+   /* Reload an offset address wrapped by an AND that represents the
+      masking of the lower bits.  Strip the outer AND and let reload
+      convert the offset address into an indirect address.  */
+-  if (TARGET_ALTIVEC
+-      && ALTIVEC_VECTOR_MODE (mode)
+-      && GET_CODE (x) == AND
+-      && GET_CODE (XEXP (x, 0)) == PLUS
+-      && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+-      && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+-      && GET_CODE (XEXP (x, 1)) == CONST_INT
+-      && INTVAL (XEXP (x, 1)) == -16)
++  else if (VECTOR_MEM_ALTIVEC_P (mode)
++	   && GET_CODE (x) == AND
++	   && GET_CODE (XEXP (x, 0)) == PLUS
++	   && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
++	   && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
++	   && GET_CODE (XEXP (x, 1)) == CONST_INT
++	   && INTVAL (XEXP (x, 1)) == -16)
+     {
+       x = XEXP (x, 0);
+       *win = 1;
+-      return x;
++      ret = x;
+     }
+ 
+-  if (TARGET_TOC
+-      && GET_CODE (x) == SYMBOL_REF
+-      && constant_pool_expr_p (x)
+-      && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
++  else if (TARGET_TOC
++	   && GET_CODE (x) == SYMBOL_REF
++	   && constant_pool_expr_p (x)
++	   && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
+     {
+       x = create_TOC_reference (x);
+       *win = 1;
+-      return x;
++      ret = x;
++    }
++
++  else
++    {
++      *win = 0;
++      ret = x;
+     }
+-  *win = 0;
+-  return x;
++
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr,
++	       "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
++	       "type = %d, ind_levels = %d, win = %d, original addr:\n",
++	       GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
++      debug_rtx (orig_x);
++
++      if (orig_x == ret)
++	fprintf (stderr, "Same address returned\n");
++      else if (!ret)
++	fprintf (stderr, "NULL returned\n");
++      else
++	{
++	  fprintf (stderr, "New address:\n");
++	  debug_rtx (ret);
++	}
++    }
++
++  return ret;
+ }
+ 
+ /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+@@ -4412,77 +4943,101 @@ rs6000_legitimize_reload_address (rtx x,
+ int
+ rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
+ {
++  int ret;
++  rtx orig_x = x;
++
+   /* If this is an unaligned stvx/ldvx type address, discard the outer AND.  */
+   if (TARGET_ALTIVEC
+-      && ALTIVEC_VECTOR_MODE (mode)
++      && (ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode))
+       && GET_CODE (x) == AND
+       && GET_CODE (XEXP (x, 1)) == CONST_INT
+       && INTVAL (XEXP (x, 1)) == -16)
+     x = XEXP (x, 0);
+ 
+   if (RS6000_SYMBOL_REF_TLS_P (x))
+-    return 0;
+-  if (legitimate_indirect_address_p (x, reg_ok_strict))
+-    return 1;
+-  if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
+-      && !ALTIVEC_VECTOR_MODE (mode)
+-      && !SPE_VECTOR_MODE (mode)
+-      && mode != TFmode
+-      && mode != TDmode
+-      /* Restrict addressing for DI because of our SUBREG hackery.  */
+-      && !(TARGET_E500_DOUBLE
+-	   && (mode == DFmode || mode == DDmode || mode == DImode))
+-      && TARGET_UPDATE
+-      && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
+-    return 1;
+-  if (legitimate_small_data_p (mode, x))
+-    return 1;
+-  if (legitimate_constant_pool_address_p (x))
+-    return 1;
++    ret = 0;
++  else if (legitimate_indirect_address_p (x, reg_ok_strict))
++    ret = 1;
++  else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
++	   && !VECTOR_MEM_ALTIVEC_P (mode)
++	   /* && !VECTOR_MEM_VSX_P (mode) */
++	   && (TARGET_SPE && !SPE_VECTOR_MODE (mode))
++	   && mode != TFmode
++	   && mode != TDmode
++	   /* Restrict addressing for DI because of our SUBREG hackery.  */
++	   && !(TARGET_E500_DOUBLE
++		&& (mode == DFmode || mode == DDmode || mode == DImode))
++	   && TARGET_UPDATE
++	   && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
++    ret = 1;
++  else if (legitimate_small_data_p (mode, x))
++    ret = 1;
++  else if (legitimate_constant_pool_address_p (x))
++    ret = 1;
+   /* If not REG_OK_STRICT (before reload) let pass any stack offset.  */
+-  if (! reg_ok_strict
+-      && GET_CODE (x) == PLUS
+-      && GET_CODE (XEXP (x, 0)) == REG
+-      && (XEXP (x, 0) == virtual_stack_vars_rtx
+-	  || XEXP (x, 0) == arg_pointer_rtx)
+-      && GET_CODE (XEXP (x, 1)) == CONST_INT)
+-    return 1;
+-  if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
+-    return 1;
+-  if (mode != TImode
+-      && mode != TFmode
+-      && mode != TDmode
+-      && ((TARGET_HARD_FLOAT && TARGET_FPRS)
+-	  || TARGET_POWERPC64
+-	  || (mode != DFmode && mode != DDmode)
+-	  || (TARGET_E500_DOUBLE && mode != DDmode))
+-      && (TARGET_POWERPC64 || mode != DImode)
+-      && !avoiding_indexed_address_p (mode)
+-      && legitimate_indexed_address_p (x, reg_ok_strict))
+-    return 1;
+-  if (GET_CODE (x) == PRE_MODIFY
+-      && mode != TImode
+-      && mode != TFmode
+-      && mode != TDmode
+-      && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
+-	  || TARGET_POWERPC64
+-	  || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
+-      && (TARGET_POWERPC64 || mode != DImode)
+-      && !ALTIVEC_VECTOR_MODE (mode)
+-      && !SPE_VECTOR_MODE (mode)
+-      /* Restrict addressing for DI because of our SUBREG hackery.  */
+-      && !(TARGET_E500_DOUBLE
+-	   && (mode == DFmode || mode == DDmode || mode == DImode))
+-      && TARGET_UPDATE
+-      && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
+-      && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
+-	  || (!avoiding_indexed_address_p (mode)
+-	      && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
+-      && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
+-    return 1;
+-  if (legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
+-    return 1;
+-  return 0;
++  else if (! reg_ok_strict
++	   && GET_CODE (x) == PLUS
++	   && GET_CODE (XEXP (x, 0)) == REG
++	   && (XEXP (x, 0) == virtual_stack_vars_rtx
++	       || XEXP (x, 0) == arg_pointer_rtx)
++	   && GET_CODE (XEXP (x, 1)) == CONST_INT)
++    ret = 1;
++  else if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
++    ret = 1;
++  else if (mode != TImode
++	   && mode != TFmode
++	   && mode != TDmode
++	   && ((TARGET_HARD_FLOAT && TARGET_FPRS)
++	       || TARGET_POWERPC64
++	       || (mode != DFmode && mode != DDmode)
++	       || (TARGET_E500_DOUBLE && mode != DDmode))
++	   && (TARGET_POWERPC64 || mode != DImode)
++	   && !avoiding_indexed_address_p (mode)
++	   && legitimate_indexed_address_p (x, reg_ok_strict))
++    ret = 1;
++  else if (GET_CODE (x) == PRE_MODIFY
++	   && VECTOR_MEM_VSX_P (mode)
++	   && TARGET_UPDATE
++	   && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)
++	   && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
++    ret = 1;
++  else if (GET_CODE (x) == PRE_MODIFY
++	   && mode != TImode
++	   && mode != TFmode
++	   && mode != TDmode
++	   && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
++	       || TARGET_POWERPC64
++	       || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
++	   && (TARGET_POWERPC64 || mode != DImode)
++	   && !VECTOR_MEM_ALTIVEC_P (mode)
++	   && (!TARGET_SPE || !SPE_VECTOR_MODE (mode))
++	   /* Restrict addressing for DI because of our SUBREG hackery.  */
++	   && !(TARGET_E500_DOUBLE
++		&& (mode == DFmode || mode == DDmode || mode == DImode))
++	   && TARGET_UPDATE
++	   && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
++	   && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
++	       || (!avoiding_indexed_address_p (mode)
++		   && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
++	   && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
++    ret = 1;
++  else if (legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
++    ret = 1;
++  else
++    ret = 0;
++
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr,
++	       "\nrs6000_legitimate_address: return = %d, mode = %s, "
++	       "strict = %d\n",
++	       ret,
++	       GET_MODE_NAME (mode),
++	       reg_ok_strict);
++      debug_rtx (orig_x);
++    }
++
++  return ret;
+ }
+ 
+ /* Go to LABEL if ADDR (a legitimate address expression)
+@@ -4499,28 +5054,40 @@ rs6000_legitimate_address (enum machine_
+ bool
+ rs6000_mode_dependent_address (rtx addr)
+ {
++  bool ret = false;
++
+   switch (GET_CODE (addr))
+     {
+     case PLUS:
+       if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ 	{
+ 	  unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
+-	  return val + 12 + 0x8000 >= 0x10000;
++	  ret = (val + 12 + 0x8000 >= 0x10000);
+ 	}
+       break;
+ 
+     case LO_SUM:
+-      return true;
++      ret = true;
++      break;
+ 
+     /* Auto-increment cases are now treated generically in recog.c.  */
+     case PRE_MODIFY:
+-      return TARGET_UPDATE;
++      ret = (TARGET_UPDATE != 0);
++      break;
+ 
+     default:
+       break;
+     }
+ 
+-  return false;
++  if (TARGET_DEBUG_ADDR)
++    {
++      fprintf (stderr,
++	       "\nrs6000_mode_dependent_address: ret = %d\n",
++	       (int)ret);
++      debug_rtx (addr);
++    }
++
++  return ret;
+ }
+ 
+ /* Implement FIND_BASE_TERM.  */
+@@ -4571,43 +5138,6 @@ rs6000_offsettable_memref_p (rtx op)
+   return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
+ }
+ 
+-/* Return number of consecutive hard regs needed starting at reg REGNO
+-   to hold something of mode MODE.
+-   This is ordinarily the length in words of a value of mode MODE
+-   but can be less for certain modes in special long registers.
+-
+-   For the SPE, GPRs are 64 bits but only 32 bits are visible in
+-   scalar instructions.  The upper 32 bits are only available to the
+-   SIMD instructions.
+-
+-   POWER and PowerPC GPRs hold 32 bits worth;
+-   PowerPC64 GPRs and FPRs point register holds 64 bits worth.  */
+-
+-int
+-rs6000_hard_regno_nregs (int regno, enum machine_mode mode)
+-{
+-  if (FP_REGNO_P (regno))
+-    return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+-
+-  if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+-    return (GET_MODE_SIZE (mode) + UNITS_PER_SPE_WORD - 1) / UNITS_PER_SPE_WORD;
+-
+-  if (ALTIVEC_REGNO_P (regno))
+-    return
+-      (GET_MODE_SIZE (mode) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD;
+-
+-  /* The value returned for SCmode in the E500 double case is 2 for
+-     ABI compatibility; storing an SCmode value in a single register
+-     would require function_arg and rs6000_spe_function_arg to handle
+-     SCmode so as to pass the value correctly in a pair of
+-     registers.  */
+-  if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
+-      && !DECIMAL_FLOAT_MODE_P (mode))
+-    return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+-
+-  return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+-}
+-
+ /* Change register usage conditional on target flags.  */
+ void
+ rs6000_conditional_register_usage (void)
+@@ -4672,14 +5202,14 @@ rs6000_conditional_register_usage (void)
+ 	= call_really_used_regs[14] = 1;
+     }
+ 
+-  if (!TARGET_ALTIVEC)
++  if (!TARGET_ALTIVEC && !TARGET_VSX)
+     {
+       for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ 	fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
+       call_really_used_regs[VRSAVE_REGNO] = 1;
+     }
+ 
+-  if (TARGET_ALTIVEC)
++  if (TARGET_ALTIVEC || TARGET_VSX)
+     global_regs[VSCR_REGNO] = 1;
+ 
+   if (TARGET_ALTIVEC_ABI)
+@@ -5101,6 +5631,8 @@ rs6000_emit_move (rtx dest, rtx source, 
+     case V2SFmode:
+     case V2SImode:
+     case V1DImode:
++    case V2DFmode:
++    case V2DImode:
+       if (CONSTANT_P (operands[1])
+ 	  && !easy_vector_constant (operands[1], mode))
+ 	operands[1] = force_const_mem (mode, operands[1]);
+@@ -5270,6 +5802,9 @@ rs6000_emit_move (rtx dest, rtx source, 
+       break;
+ 
+     case TImode:
++      if (VECTOR_MEM_ALTIVEC_OR_VSX_P (TImode))
++	break;
++
+       rs6000_eliminate_indexed_memrefs (operands);
+ 
+       if (TARGET_POWER)
+@@ -5285,7 +5820,7 @@ rs6000_emit_move (rtx dest, rtx source, 
+       break;
+ 
+     default:
+-      gcc_unreachable ();
++      fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
+     }
+ 
+   /* Above, we may have called force_const_mem which may have returned
+@@ -5305,10 +5840,10 @@ rs6000_emit_move (rtx dest, rtx source, 
+    && TARGET_HARD_FLOAT && TARGET_FPRS)
+ 
+ /* Nonzero if we can use an AltiVec register to pass this arg.  */
+-#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED)	\
+-  (ALTIVEC_VECTOR_MODE (MODE)				\
+-   && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG		\
+-   && TARGET_ALTIVEC_ABI				\
++#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED)		\
++  ((ALTIVEC_VECTOR_MODE (MODE) || VSX_VECTOR_MODE (MODE))	\
++   && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG			\
++   && TARGET_ALTIVEC_ABI					\
+    && (NAMED))
+ 
+ /* Return a nonzero value to say to return the function value in
+@@ -5549,7 +6084,7 @@ function_arg_boundary (enum machine_mode
+ 	       && int_size_in_bytes (type) >= 8
+ 	       && int_size_in_bytes (type) < 16))
+     return 64;
+-  else if (ALTIVEC_VECTOR_MODE (mode)
++  else if ((ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode))
+ 	   || (type && TREE_CODE (type) == VECTOR_TYPE
+ 	       && int_size_in_bytes (type) >= 16))
+     return 128;
+@@ -5694,7 +6229,7 @@ function_arg_advance (CUMULATIVE_ARGS *c
+     cum->nargs_prototype--;
+ 
+   if (TARGET_ALTIVEC_ABI
+-      && (ALTIVEC_VECTOR_MODE (mode)
++      && ((ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode))
+ 	  || (type && TREE_CODE (type) == VECTOR_TYPE
+ 	      && int_size_in_bytes (type) == 16)))
+     {
+@@ -6288,7 +6823,7 @@ function_arg (CUMULATIVE_ARGS *cum, enum
+     else
+       return gen_rtx_REG (mode, cum->vregno);
+   else if (TARGET_ALTIVEC_ABI
+-	   && (ALTIVEC_VECTOR_MODE (mode)
++	   && ((ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode))
+ 	       || (type && TREE_CODE (type) == VECTOR_TYPE
+ 		   && int_size_in_bytes (type) == 16)))
+     {
+@@ -7212,10 +7747,13 @@ static const struct builtin_description 
+   { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
++  { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v2df, "__builtin_altivec_vperm_2df", ALTIVEC_BUILTIN_VPERM_2DF },
++  { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v2di, "__builtin_altivec_vperm_2di", ALTIVEC_BUILTIN_VPERM_2DI },
++  { MASK_ALTIVEC, CODE_FOR_vector_vselv4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
++  { MASK_ALTIVEC, CODE_FOR_vector_vselv4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
++  { MASK_ALTIVEC, CODE_FOR_vector_vselv8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
++  { MASK_ALTIVEC, CODE_FOR_vector_vselv16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
++  { MASK_ALTIVEC, CODE_FOR_vector_vselv2df, "__builtin_altivec_vsel_2df", ALTIVEC_BUILTIN_VSEL_2DF },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
+@@ -7289,18 +7827,18 @@ static struct builtin_description bdesc_
+   { MASK_ALTIVEC, CODE_FOR_altivec_vcfux, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vcfsx, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
++  { MASK_ALTIVEC, CODE_FOR_vector_eqv16qi, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
++  { MASK_ALTIVEC, CODE_FOR_vector_eqv8hi, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
++  { MASK_ALTIVEC, CODE_FOR_vector_eqv4si, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
++  { MASK_ALTIVEC, CODE_FOR_vector_eqv4sf, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
++  { MASK_ALTIVEC, CODE_FOR_vector_gev4sf, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
++  { MASK_ALTIVEC, CODE_FOR_vector_gtuv16qi, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
++  { MASK_ALTIVEC, CODE_FOR_vector_gtuv8hi, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
++  { MASK_ALTIVEC, CODE_FOR_vector_gtuv4si, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
++  { MASK_ALTIVEC, CODE_FOR_vector_gtv16qi, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
++  { MASK_ALTIVEC, CODE_FOR_vector_gtv8hi, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
++  { MASK_ALTIVEC, CODE_FOR_vector_gtv4si, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
++  { MASK_ALTIVEC, CODE_FOR_vector_gtv4sf, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vctsxs, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vctuxs, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS },
+   { MASK_ALTIVEC, CODE_FOR_umaxv16qi3, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB },
+@@ -7331,7 +7869,7 @@ static struct builtin_description bdesc_
+   { MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
+-  { MASK_ALTIVEC, CODE_FOR_altivec_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
++  { MASK_ALTIVEC, CODE_FOR_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
+   { MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
+   { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
+@@ -7796,6 +8334,11 @@ static struct builtin_description bdesc_
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH },
+   { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB },
+ 
++  { MASK_ALTIVEC|MASK_VSX, CODE_FOR_floatv4siv4sf2, "__builtin_vec_float_sisf", VECTOR_BUILTIN_FLOAT_V4SI_V4SF },
++  { MASK_ALTIVEC|MASK_VSX, CODE_FOR_unsigned_floatv4siv4sf2, "__builtin_vec_uns_float_sisf", VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF },
++  { MASK_ALTIVEC|MASK_VSX, CODE_FOR_fix_truncv4sfv4si2, "__builtin_vec_fix_sfsi", VECTOR_BUILTIN_FIX_V4SF_V4SI },
++  { MASK_ALTIVEC|MASK_VSX, CODE_FOR_fixuns_truncv4sfv4si2, "__builtin_vec_fixuns_sfsi", VECTOR_BUILTIN_FIXUNS_V4SF_V4SI },
++
+   /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
+      end with SPE_BUILTIN_EVSUBFUSIAAW.  */
+   { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
+@@ -8352,16 +8895,16 @@ altivec_expand_ld_builtin (tree exp, rtx
+   switch (fcode)
+     {
+     case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
+-      icode = CODE_FOR_altivec_lvx_v16qi;
++      icode = CODE_FOR_vector_load_v16qi;
+       break;
+     case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
+-      icode = CODE_FOR_altivec_lvx_v8hi;
++      icode = CODE_FOR_vector_load_v8hi;
+       break;
+     case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
+-      icode = CODE_FOR_altivec_lvx_v4si;
++      icode = CODE_FOR_vector_load_v4si;
+       break;
+     case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
+-      icode = CODE_FOR_altivec_lvx_v4sf;
++      icode = CODE_FOR_vector_load_v4sf;
+       break;
+     default:
+       *expandedp = false;
+@@ -8405,16 +8948,16 @@ altivec_expand_st_builtin (tree exp, rtx
+   switch (fcode)
+     {
+     case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
+-      icode = CODE_FOR_altivec_stvx_v16qi;
++      icode = CODE_FOR_vector_store_v16qi;
+       break;
+     case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
+-      icode = CODE_FOR_altivec_stvx_v8hi;
++      icode = CODE_FOR_vector_store_v8hi;
+       break;
+     case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
+-      icode = CODE_FOR_altivec_stvx_v4si;
++      icode = CODE_FOR_vector_store_v4si;
+       break;
+     case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
+-      icode = CODE_FOR_altivec_stvx_v4sf;
++      icode = CODE_FOR_vector_store_v4sf;
+       break;
+     default:
+       *expandedp = false;
+@@ -9372,6 +9915,8 @@ rs6000_init_builtins (void)
+ {
+   V2SI_type_node = build_vector_type (intSI_type_node, 2);
+   V2SF_type_node = build_vector_type (float_type_node, 2);
++  V2DI_type_node = build_vector_type (intDI_type_node, 2);
++  V2DF_type_node = build_vector_type (double_type_node, 2);
+   V4HI_type_node = build_vector_type (intHI_type_node, 4);
+   V4SI_type_node = build_vector_type (intSI_type_node, 4);
+   V4SF_type_node = build_vector_type (float_type_node, 4);
+@@ -9404,7 +9949,10 @@ rs6000_init_builtins (void)
+   uintHI_type_internal_node = unsigned_intHI_type_node;
+   intSI_type_internal_node = intSI_type_node;
+   uintSI_type_internal_node = unsigned_intSI_type_node;
++  intDI_type_internal_node = intDI_type_node;
++  uintDI_type_internal_node = unsigned_intDI_type_node;
+   float_type_internal_node = float_type_node;
++  double_type_internal_node = float_type_node;
+   void_type_internal_node = void_type_node;
+ 
+   (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+@@ -9462,13 +10010,18 @@ rs6000_init_builtins (void)
+ 					    get_identifier ("__vector __pixel"),
+ 					    pixel_V8HI_type_node));
+ 
++  if (TARGET_VSX)
++    (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
++					      get_identifier ("__vector double"),
++					      V2DF_type_node));
++
+   if (TARGET_PAIRED_FLOAT)
+     paired_init_builtins ();
+   if (TARGET_SPE)
+     spe_init_builtins ();
+   if (TARGET_ALTIVEC)
+     altivec_init_builtins ();
+-  if (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT)
++  if (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT || TARGET_VSX)
+     rs6000_common_init_builtins ();
+   if (TARGET_PPC_GFXOPT)
+     {
+@@ -10407,6 +10960,26 @@ rs6000_common_init_builtins (void)
+   tree int_ftype_v8hi_v8hi
+     = build_function_type_list (integer_type_node,
+ 				V8HI_type_node, V8HI_type_node, NULL_TREE);
++  tree v2df_ftype_v2df_v2df_v2df
++    = build_function_type_list (V2DF_type_node,
++				V2DF_type_node, V2DF_type_node,
++				V2DF_type_node, NULL_TREE);
++  tree v2di_ftype_v2di_v2di_v2di
++    = build_function_type_list (V2DI_type_node,
++				V2DI_type_node, V2DI_type_node,
++				V2DI_type_node, NULL_TREE);
++  tree v2df_ftype_v2df_v2df_v16qi
++    = build_function_type_list (V2DF_type_node,
++				V2DF_type_node, V2DF_type_node,
++				V16QI_type_node, NULL_TREE);
++  tree v2di_ftype_v2di_v2di_v16qi
++    = build_function_type_list (V2DI_type_node,
++				V2DI_type_node, V2DI_type_node,
++				V16QI_type_node, NULL_TREE);
++  tree v4sf_ftype_v4si
++    = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
++  tree v4si_ftype_v4sf
++    = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
+ 
+   /* Add the simple ternary operators.  */
+   d = bdesc_3arg;
+@@ -10443,6 +11016,12 @@ rs6000_common_init_builtins (void)
+ 	    case VOIDmode:
+ 	      type = opaque_ftype_opaque_opaque_opaque;
+ 	      break;
++	    case V2DImode:
++	      type = v2di_ftype_v2di_v2di_v2di;
++	      break;
++	    case V2DFmode:
++	      type = v2df_ftype_v2df_v2df_v2df;
++	      break;
+ 	    case V4SImode:
+ 	      type = v4si_ftype_v4si_v4si_v4si;
+ 	      break;
+@@ -10466,6 +11045,12 @@ rs6000_common_init_builtins (void)
+ 	{
+ 	  switch (mode0)
+ 	    {
++	    case V2DImode:
++	      type = v2di_ftype_v2di_v2di_v16qi;
++	      break;
++	    case V2DFmode:
++	      type = v2df_ftype_v2df_v2df_v16qi;
++	      break;
+ 	    case V4SImode:
+ 	      type = v4si_ftype_v4si_v4si_v16qi;
+ 	      break;
+@@ -10721,6 +11306,10 @@ rs6000_common_init_builtins (void)
+ 	type = v2si_ftype_v2sf;
+       else if (mode0 == V2SImode && mode1 == QImode)
+ 	type = v2si_ftype_char;
++      else if (mode0 == V4SImode && mode1 == V4SFmode)
++	type = v4si_ftype_v4sf;
++      else if (mode0 == V4SFmode && mode1 == V4SImode)
++	type = v4sf_ftype_v4si;
+       else
+ 	gcc_unreachable ();
+ 
+@@ -11601,13 +12190,101 @@ rs6000_instantiate_decls (void)
+     instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
+ }
+ 
++/* Given an rtx X being reloaded into a reg required to be
++   in class CLASS, return the class of reg to actually use.
++   In general this is just CLASS; but on some machines
++   in some cases it is preferable to use a more restrictive class.
++
++   On the RS/6000, we have to return NO_REGS when we want to reload a
++   floating-point CONST_DOUBLE to force it to be copied to memory.
++
++   We also don't want to reload integer values into floating-point
++   registers if we can at all help it.  In fact, this can
++   cause reload to die, if it tries to generate a reload of CTR
++   into a FP register and discovers it doesn't have the memory location
++   required.
++
++   ??? Would it be a good idea to have reload do the converse, that is
++   try to reload floating modes into FP registers if possible?
++ */
++
++enum reg_class
++rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
++{
++  enum machine_mode mode = GET_MODE (x);
++
++  if (TARGET_VSX && VSX_VECTOR_MODE (mode) && x == CONST0_RTX (mode)
++      && VSX_REG_CLASS_P (rclass))
++    return rclass;
++
++  if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode) && rclass == ALTIVEC_REGS
++      && easy_vector_constant (x, mode))
++    return rclass;
++
++  if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
++    return NO_REGS;
++
++  if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
++    return GENERAL_REGS;
++
++  /* For VSX, prefer the traditional registers.  */
++  if (rclass == VSX_REGS)
++    {
++      if (mode == DFmode)
++	return FLOAT_REGS;
++
++      if (ALTIVEC_VECTOR_MODE (mode))
++	return ALTIVEC_REGS;
++    }
++
++  return rclass;
++}
++
++/* If we are copying between FP or AltiVec registers and anything else, we need
++   a memory location.  The exception is when we are targeting ppc64 and the
++   move to/from fpr to gpr instructions are available.  Also, under VSX, you
++   can copy vector registers from the FP register set to the Altivec register
++   set and vice versa.  */
++
++bool
++rs6000_secondary_memory_needed (enum reg_class class1,
++				enum reg_class class2,
++				enum machine_mode mode)
++{
++  if (class1 == class2)
++    return false;
++
++  if (TARGET_VSX && VSX_MOVE_MODE (mode) && VSX_REG_CLASS_P (class1)
++      && VSX_REG_CLASS_P (class2))
++    return false;
++
++  if (class1 == FLOAT_REGS
++       && (!TARGET_MFPGPR || !TARGET_POWERPC64
++	   || ((mode != DFmode)
++	       && (mode != DDmode)
++	       && (mode != DImode))))
++    return true;
++
++  if (class2 == FLOAT_REGS
++      && (!TARGET_MFPGPR || !TARGET_POWERPC64
++	  || ((mode != DFmode)
++	      && (mode != DDmode)
++	      && (mode != DImode))))
++    return true;
++
++  if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
++    return true;
++
++  return false;
++}
++
+ /* Return the register class of a scratch register needed to copy IN into
+    or out of a register in RCLASS in MODE.  If it can be done directly,
+    NO_REGS is returned.  */
+ 
+ enum reg_class
+ rs6000_secondary_reload_class (enum reg_class rclass,
+-			       enum machine_mode mode ATTRIBUTE_UNUSED,
++			       enum machine_mode mode,
+ 			       rtx in)
+ {
+   int regno;
+@@ -11663,6 +12340,13 @@ rs6000_secondary_reload_class (enum reg_
+       && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
+     return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
+ 
++  /* Memory, and FP/altivec registers can go into fp/altivec registers under
++     VSX.  */
++  if (TARGET_VSX
++      && (regno == -1 || VSX_REGNO_P (regno))
++      && VSX_REG_CLASS_P (rclass))
++    return NO_REGS;
++
+   /* Memory, and AltiVec registers can go into AltiVec registers.  */
+   if ((regno == -1 || ALTIVEC_REGNO_P (regno))
+       && rclass == ALTIVEC_REGS)
+@@ -11676,6 +12360,28 @@ rs6000_secondary_reload_class (enum reg_
+   /* Otherwise, we need GENERAL_REGS.  */
+   return GENERAL_REGS;
+ }
++
++/* Return nonzero if for CLASS a mode change from FROM to TO is invalid.  */
++
++bool
++rs6000_cannot_change_mode_class (enum machine_mode from,
++				 enum machine_mode to,
++				 enum reg_class rclass)
++{
++  return (GET_MODE_SIZE (from) != GET_MODE_SIZE (to)
++	  ? ((GET_MODE_SIZE (from) < 8 || GET_MODE_SIZE (to) < 8
++	      || TARGET_IEEEQUAD)
++	     && reg_classes_intersect_p (FLOAT_REGS, rclass))
++	  : (((TARGET_E500_DOUBLE
++	       && ((((to) == DFmode) + ((from) == DFmode)) == 1
++		   || (((to) == TFmode) + ((from) == TFmode)) == 1
++		   || (((to) == DDmode) + ((from) == DDmode)) == 1
++		   || (((to) == TDmode) + ((from) == TDmode)) == 1
++		   || (((to) == DImode) + ((from) == DImode)) == 1))
++	      || (TARGET_SPE
++		  && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1))
++	     && reg_classes_intersect_p (GENERAL_REGS, rclass)));
++}
+ 
+ /* Given a comparison operation, return the bit number in CCR to test.  We
+    know this is a valid comparison.
+@@ -12406,6 +13112,26 @@ print_operand (FILE *file, rtx x, int co
+       fprintf (file, "%d", i + 1);
+       return;
+ 
++    case 'x':
++      /* X is a FPR or Altivec register used in a VSX context.  */
++      if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
++	output_operand_lossage ("invalid %%x value");
++      else
++	{
++	  int reg = REGNO (x);
++	  int vsx_reg = (FP_REGNO_P (reg)
++			 ? reg - 32
++			 : reg - FIRST_ALTIVEC_REGNO + 32);
++
++#ifdef TARGET_REGNAMES      
++	  if (TARGET_REGNAMES)
++	    fprintf (file, "%%vs%d", vsx_reg);
++	  else
++#endif
++	    fprintf (file, "%d", vsx_reg);
++	}
++      return;
++
+     case 'X':
+       if (GET_CODE (x) == MEM
+ 	  && (legitimate_indexed_address_p (XEXP (x, 0), 0)
+@@ -12518,13 +13244,16 @@ print_operand (FILE *file, rtx x, int co
+ 
+ 	    /* Fall through.  Must be [reg+reg].  */
+ 	  }
+-	if (TARGET_ALTIVEC
++	if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
+ 	    && GET_CODE (tmp) == AND
+ 	    && GET_CODE (XEXP (tmp, 1)) == CONST_INT
+ 	    && INTVAL (XEXP (tmp, 1)) == -16)
+ 	  tmp = XEXP (tmp, 0);
++	else if (VECTOR_MEM_VSX_P (GET_MODE (x))
++		 && GET_CODE (tmp) == PRE_MODIFY)
++	  tmp = XEXP (tmp, 1);
+ 	if (GET_CODE (tmp) == REG)
+-	  fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
++	  fprintf (file, "%s,%s", reg_names[0], reg_names[REGNO (tmp)]);
+ 	else
+ 	  {
+ 	    if (!GET_CODE (tmp) == PLUS
+@@ -13296,55 +14025,62 @@ output_e500_flip_gt_bit (rtx dst, rtx sr
+   return string;
+ }
+ 
+-/* Return insn index for the vector compare instruction for given CODE,
+-   and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
+-   not available.  */
++/* Return insn for VSX comparisons.  */
+ 
+-static int
+-get_vec_cmp_insn (enum rtx_code code,
+-		  enum machine_mode dest_mode,
+-		  enum machine_mode op_mode)
++static rtx
++rs6000_emit_vector_compare_vsx (enum rtx_code code,
++				rtx mask,
++				rtx op0,
++				rtx op1)
+ {
+-  if (!TARGET_ALTIVEC)
+-    return INSN_NOT_AVAILABLE;
+-
+   switch (code)
+     {
++    default:
++      break;
++
+     case EQ:
+-      if (dest_mode == V16QImode && op_mode == V16QImode)
+-	return UNSPEC_VCMPEQUB;
+-      if (dest_mode == V8HImode && op_mode == V8HImode)
+-	return UNSPEC_VCMPEQUH;
+-      if (dest_mode == V4SImode && op_mode == V4SImode)
+-	return UNSPEC_VCMPEQUW;
+-      if (dest_mode == V4SImode && op_mode == V4SFmode)
+-	return UNSPEC_VCMPEQFP;
++    case GT:
++    case GE:
++      emit_insn (gen_rtx_SET (VOIDmode,
++			      mask,
++			      gen_rtx_fmt_ee (code, GET_MODE (mask),
++					      op0,
++					      op1)));
++      return mask;
++    }
++
++  return NULL_RTX;
++}
++
++/* Return insn for Altivec comparisons.  */
++
++static rtx
++rs6000_emit_vector_compare_altivec (enum rtx_code code,
++				    rtx mask,
++				    rtx op0,
++				    rtx op1)
++{
++  switch (code)
++    {
++    default:
+       break;
++
+     case GE:
+-      if (dest_mode == V4SImode && op_mode == V4SFmode)
+-	return UNSPEC_VCMPGEFP;
++      if (GET_MODE (mask) != V4SFmode)
++	return NULL_RTX;
++      /* fall through */
++    case EQ:
+     case GT:
+-      if (dest_mode == V16QImode && op_mode == V16QImode)
+-	return UNSPEC_VCMPGTSB;
+-      if (dest_mode == V8HImode && op_mode == V8HImode)
+-	return UNSPEC_VCMPGTSH;
+-      if (dest_mode == V4SImode && op_mode == V4SImode)
+-	return UNSPEC_VCMPGTSW;
+-      if (dest_mode == V4SImode && op_mode == V4SFmode)
+-	return UNSPEC_VCMPGTFP;
+-      break;
+     case GTU:
+-      if (dest_mode == V16QImode && op_mode == V16QImode)
+-	return UNSPEC_VCMPGTUB;
+-      if (dest_mode == V8HImode && op_mode == V8HImode)
+-	return UNSPEC_VCMPGTUH;
+-      if (dest_mode == V4SImode && op_mode == V4SImode)
+-	return UNSPEC_VCMPGTUW;
+-      break;
+-    default:
+-      break;
++      emit_insn (gen_rtx_SET (VOIDmode,
++			      mask,
++			      gen_rtx_fmt_ee (code, GET_MODE (mask),
++					      op0,
++					      op1)));
++      return mask;
+     }
+-  return INSN_NOT_AVAILABLE;
++
++  return NULL_RTX;
+ }
+ 
+ /* Emit vector compare for operands OP0 and OP1 using code RCODE.
+@@ -13355,129 +14091,111 @@ rs6000_emit_vector_compare (enum rtx_cod
+ 			    rtx op0, rtx op1,
+ 			    enum machine_mode dmode)
+ {
+-  int vec_cmp_insn;
+   rtx mask;
+-  enum machine_mode dest_mode;
+-  enum machine_mode op_mode = GET_MODE (op1);
++  bool swap_operands = false;
++  bool try_again = false;
+ 
+-  gcc_assert (TARGET_ALTIVEC);
++  gcc_assert (TARGET_ALTIVEC || TARGET_VSX);
+   gcc_assert (GET_MODE (op0) == GET_MODE (op1));
+ 
+-  /* Floating point vector compare instructions uses destination V4SImode.
+-     Move destination to appropriate mode later.  */
+-  if (dmode == V4SFmode)
+-    dest_mode = V4SImode;
+-  else
+-    dest_mode = dmode;
++  mask = gen_reg_rtx (dmode);
+ 
+-  mask = gen_reg_rtx (dest_mode);
+-  vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
++  /* Try for VSX before Altivec.  */
++  if (TARGET_VSX && VSX_VECTOR_MODE (dmode))
++    {
++      rtx vsx = rs6000_emit_vector_compare_vsx (rcode, mask, op0, op1);
++      if (vsx)
++	return vsx;
++    }
++  else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (dmode))
++    {
++      rtx av = rs6000_emit_vector_compare_altivec (rcode, mask, op0, op1);
++      if (av)
++	return av;
++    }
+ 
+-  if (vec_cmp_insn == INSN_NOT_AVAILABLE)
++  switch (rcode)
+     {
+-      bool swap_operands = false;
+-      bool try_again = false;
+-      switch (rcode)
+-	{
+-	case LT:
+-	  rcode = GT;
+-	  swap_operands = true;
+-	  try_again = true;
+-	  break;
+-	case LTU:
+-	  rcode = GTU;
+-	  swap_operands = true;
+-	  try_again = true;
+-	  break;
+-	case NE:
+-	case UNLE:
+-	case UNLT:
+-	case UNGE:
+-	case UNGT:
+-	  /* Invert condition and try again.
+-	     e.g., A != B becomes ~(A==B).  */
+-	  {
+-	    enum rtx_code rev_code;
+-	    enum insn_code nor_code;
+-	    rtx eq_rtx;
+-
+-	    rev_code = reverse_condition_maybe_unordered (rcode);
+-	    eq_rtx = rs6000_emit_vector_compare (rev_code, op0, op1,
+-						 dest_mode);
+-
+-	    nor_code = optab_handler (one_cmpl_optab, (int)dest_mode)->insn_code;
+-	    gcc_assert (nor_code != CODE_FOR_nothing);
+-	    emit_insn (GEN_FCN (nor_code) (mask, eq_rtx));
++    case LT:
++      rcode = GT;
++      swap_operands = true;
++      try_again = true;
++      break;
++    case LTU:
++      rcode = GTU;
++      swap_operands = true;
++      try_again = true;
++      break;
++    case NE:
++    case UNLE:
++    case UNLT:
++    case UNGE:
++    case UNGT:
++      /* Invert condition and try again.
++	 e.g., A != B becomes ~(A==B).  */
++      {
++	enum rtx_code rev_code;
++	enum insn_code nor_code;
++	rtx eq_rtx;
++
++	rev_code = reverse_condition_maybe_unordered (rcode);
++	eq_rtx = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
++
++	nor_code = optab_handler (one_cmpl_optab, (int)dmode)->insn_code;
++	gcc_assert (nor_code != CODE_FOR_nothing);
++	emit_insn (GEN_FCN (nor_code) (mask, eq_rtx));
++	return mask;
++      }
++      break;
++    case GE:
++    case GEU:
++    case LE:
++    case LEU:
++      /* Try GT/GTU/LT/LTU OR EQ */
++      {
++	rtx c_rtx, eq_rtx;
++	enum insn_code ior_code;
++	enum rtx_code new_code;
+ 
+-	    if (dmode != dest_mode)
+-	      {
+-		rtx temp = gen_reg_rtx (dest_mode);
+-		convert_move (temp, mask, 0);
+-		return temp;
+-	      }
+-	    return mask;
+-	  }
+-	  break;
+-	case GE:
+-	case GEU:
+-	case LE:
+-	case LEU:
+-	  /* Try GT/GTU/LT/LTU OR EQ */
++	switch (rcode)
+ 	  {
+-	    rtx c_rtx, eq_rtx;
+-	    enum insn_code ior_code;
+-	    enum rtx_code new_code;
+-
+-	    switch (rcode)
+-	      {
+-	      case  GE:
+-		new_code = GT;
+-		break;
+-
+-	      case GEU:
+-		new_code = GTU;
+-		break;
++	  case  GE:
++	    new_code = GT;
++	    break;
+ 
+-	      case LE:
+-		new_code = LT;
+-		break;
++	  case GEU:
++	    new_code = GTU;
++	    break;
+ 
+-	      case LEU:
+-		new_code = LTU;
+-		break;
++	  case LE:
++	    new_code = LT;
++	    break;
+ 
+-	      default:
+-		gcc_unreachable ();
+-	      }
++	  case LEU:
++	    new_code = LTU;
++	    break;
+ 
+-	    c_rtx = rs6000_emit_vector_compare (new_code,
+-						op0, op1, dest_mode);
+-	    eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
+-						 dest_mode);
+-
+-	    ior_code = optab_handler (ior_optab, (int)dest_mode)->insn_code;
+-	    gcc_assert (ior_code != CODE_FOR_nothing);
+-	    emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
+-	    if (dmode != dest_mode)
+-	      {
+-		rtx temp = gen_reg_rtx (dest_mode);
+-		convert_move (temp, mask, 0);
+-		return temp;
+-	      }
+-	    return mask;
++	  default:
++	    gcc_unreachable ();
+ 	  }
+-	  break;
+-	default:
+-	  gcc_unreachable ();
+-	}
+ 
+-      if (try_again)
+-	{
+-	  vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
+-	  /* You only get two chances.  */
+-	  gcc_assert (vec_cmp_insn != INSN_NOT_AVAILABLE);
+-	}
++	c_rtx = rs6000_emit_vector_compare (new_code,
++					    op0, op1, dmode);
++	eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
++					     dmode);
++
++	ior_code = optab_handler (ior_optab, (int)dmode)->insn_code;
++	gcc_assert (ior_code != CODE_FOR_nothing);
++	emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
++	return mask;
++      }
++      break;
++    default:
++      gcc_unreachable ();
++    }
+ 
++  if (try_again)
++    {
+       if (swap_operands)
+ 	{
+ 	  rtx tmp;
+@@ -13485,69 +14203,23 @@ rs6000_emit_vector_compare (enum rtx_cod
+ 	  op0 = op1;
+ 	  op1 = tmp;
+ 	}
+-    }
+-
+-  emit_insn (gen_rtx_SET (VOIDmode, mask,
+-			  gen_rtx_UNSPEC (dest_mode,
+-					  gen_rtvec (2, op0, op1),
+-					  vec_cmp_insn)));
+-  if (dmode != dest_mode)
+-    {
+-      rtx temp = gen_reg_rtx (dest_mode);
+-      convert_move (temp, mask, 0);
+-      return temp;
+-    }
+-  return mask;
+-}
+-
+-/* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
+-   valid insn doesn exist for given mode.  */
+ 
+-static int
+-get_vsel_insn (enum machine_mode mode)
+-{
+-  switch (mode)
+-    {
+-    case V4SImode:
+-      return UNSPEC_VSEL4SI;
+-      break;
+-    case V4SFmode:
+-      return UNSPEC_VSEL4SF;
+-      break;
+-    case V8HImode:
+-      return UNSPEC_VSEL8HI;
+-      break;
+-    case V16QImode:
+-      return UNSPEC_VSEL16QI;
+-      break;
+-    default:
+-      return INSN_NOT_AVAILABLE;
+-      break;
++      if (TARGET_VSX && VSX_VECTOR_MODE (dmode))
++	{
++	  rtx vsx = rs6000_emit_vector_compare_vsx (rcode, mask, op0, op1);
++	  if (vsx)
++	    return vsx;
++	}
++      else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (dmode))
++	{
++	  rtx av = rs6000_emit_vector_compare_altivec (rcode, mask, op0, op1);
++	  if (av)
++	    return av;
++	}
+     }
+-  return INSN_NOT_AVAILABLE;
+-}
+-
+-/* Emit vector select insn where DEST is destination using
+-   operands OP1, OP2 and MASK.  */
+-
+-static void
+-rs6000_emit_vector_select (rtx dest, rtx op1, rtx op2, rtx mask)
+-{
+-  rtx t, temp;
+-  enum machine_mode dest_mode = GET_MODE (dest);
+-  int vsel_insn_index  = get_vsel_insn (GET_MODE (dest));
+ 
+-  temp = gen_reg_rtx (dest_mode);
+-
+-  /* For each vector element, select op1 when mask is 1 otherwise
+-     select op2.  */
+-  t = gen_rtx_SET (VOIDmode, temp,
+-		   gen_rtx_UNSPEC (dest_mode,
+-				   gen_rtvec (3, op2, op1, mask),
+-				   vsel_insn_index));
+-  emit_insn (t);
+-  emit_move_insn (dest, temp);
+-  return;
++  /* You only get two chances.  */
++  gcc_unreachable ();
+ }
+ 
+ /* Emit vector conditional expression.
+@@ -13562,15 +14234,29 @@ rs6000_emit_vector_cond_expr (rtx dest, 
+   enum rtx_code rcode = GET_CODE (cond);
+   rtx mask;
+ 
+-  if (!TARGET_ALTIVEC)
++  if (!TARGET_ALTIVEC && !TARGET_VSX)
+     return 0;
+ 
+   /* Get the vector mask for the given relational operations.  */
+   mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
+ 
+-  rs6000_emit_vector_select (dest, op1, op2, mask);
++  if (!mask)
++    return 0;
+ 
+-  return 1;
++  if ((TARGET_VSX && VSX_VECTOR_MOVE_MODE (dest_mode))
++      || (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (dest_mode)))
++    {
++      rtx cond2 = gen_rtx_fmt_ee (NE, VOIDmode, mask, const0_rtx);
++      emit_insn (gen_rtx_SET (VOIDmode,
++			      dest,
++			      gen_rtx_IF_THEN_ELSE (dest_mode,
++						    cond2,
++						    op1,
++						    op2)));
++      return 1;
++    }
++
++  return 0;
+ }
+ 
+ /* Emit a conditional move: move TRUE_COND to DEST if OP of the
+@@ -13766,8 +14452,8 @@ rs6000_emit_int_cmove (rtx dest, rtx op,
+ {
+   rtx condition_rtx, cr;
+ 
+-  /* All isel implementations thus far are 32-bits.  */
+-  if (GET_MODE (rs6000_compare_op0) != SImode)
++  if (GET_MODE (rs6000_compare_op0) != SImode
++      && (!TARGET_POWERPC64 || GET_MODE (rs6000_compare_op0) != DImode))
+     return 0;
+ 
+   /* We still have to do the compare, because isel doesn't do a
+@@ -13776,12 +14462,24 @@ rs6000_emit_int_cmove (rtx dest, rtx op,
+   condition_rtx = rs6000_generate_compare (GET_CODE (op));
+   cr = XEXP (condition_rtx, 0);
+ 
+-  if (GET_MODE (cr) == CCmode)
+-    emit_insn (gen_isel_signed (dest, condition_rtx,
+-				true_cond, false_cond, cr));
++  if (GET_MODE (rs6000_compare_op0) == SImode)
++    {
++      if (GET_MODE (cr) == CCmode)
++	emit_insn (gen_isel_signed_si (dest, condition_rtx,
++				       true_cond, false_cond, cr));
++      else
++	emit_insn (gen_isel_unsigned_si (dest, condition_rtx,
++					 true_cond, false_cond, cr));
++    }
+   else
+-    emit_insn (gen_isel_unsigned (dest, condition_rtx,
+-				  true_cond, false_cond, cr));
++    {
++      if (GET_MODE (cr) == CCmode)
++	emit_insn (gen_isel_signed_di (dest, condition_rtx,
++				       true_cond, false_cond, cr));
++      else
++	emit_insn (gen_isel_unsigned_di (dest, condition_rtx,
++					 true_cond, false_cond, cr));
++    }
+ 
+   return 1;
+ }
+@@ -13808,6 +14506,15 @@ rs6000_emit_minmax (rtx dest, enum rtx_c
+   enum rtx_code c;
+   rtx target;
+ 
++  /* VSX/altivec have direct min/max insns.  */
++  if ((code == SMAX || code == SMIN) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode))
++    {
++      emit_insn (gen_rtx_SET (VOIDmode,
++			      dest,
++			      gen_rtx_fmt_ee (code, mode, op0, op1)));
++      return;
++    }
++
+   if (code == SMAX || code == SMIN)
+     c = GE;
+   else
+@@ -15785,6 +16492,7 @@ emit_frame_save (rtx frame_reg, rtx fram
+ 
+   /* Some cases that need register indexed addressing.  */
+   if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
++      || (TARGET_VSX && VSX_VECTOR_MODE (mode))
+       || (TARGET_E500_DOUBLE && mode == DFmode)
+       || (TARGET_SPE_ABI
+ 	  && SPE_VECTOR_MODE (mode)
+@@ -19320,6 +20028,7 @@ rs6000_issue_rate (void)
+   case CPU_POWER4:
+   case CPU_POWER5:
+   case CPU_POWER6:
++  case CPU_POWER7:
+     return 5;
+   default:
+     return 1;
+@@ -19921,6 +20630,41 @@ insn_must_be_first_in_group (rtx insn)
+           break;
+         }
+       break;
++    case PROCESSOR_POWER7:
++      type = get_attr_type (insn);
++
++      switch (type)
++        {
++        case TYPE_CR_LOGICAL:
++        case TYPE_MFCR:
++        case TYPE_MFCRF:
++        case TYPE_MTCR:
++        case TYPE_IDIV:
++        case TYPE_LDIV:
++        case TYPE_COMPARE:
++        case TYPE_DELAYED_COMPARE:
++        case TYPE_VAR_DELAYED_COMPARE:
++        case TYPE_ISYNC:
++        case TYPE_LOAD_L:
++        case TYPE_STORE_C:
++        case TYPE_LOAD_U:
++        case TYPE_LOAD_UX:
++        case TYPE_LOAD_EXT:
++        case TYPE_LOAD_EXT_U:
++        case TYPE_LOAD_EXT_UX:
++        case TYPE_STORE_U:
++        case TYPE_STORE_UX:
++        case TYPE_FPLOAD_U:
++        case TYPE_FPLOAD_UX:
++        case TYPE_FPSTORE_U:
++        case TYPE_FPSTORE_UX:
++        case TYPE_MFJMPR:
++        case TYPE_MTJMPR:
++          return true;
++        default:
++          break;
++        }
++      break;
+     default:
+       break;
+     }
+@@ -19982,6 +20726,23 @@ insn_must_be_last_in_group (rtx insn)
+         break;
+     }
+     break;
++  case PROCESSOR_POWER7:
++    type = get_attr_type (insn);
++
++    switch (type)
++      {
++      case TYPE_ISYNC:
++      case TYPE_SYNC:
++      case TYPE_LOAD_L:
++      case TYPE_STORE_C:
++      case TYPE_LOAD_EXT_U:
++      case TYPE_LOAD_EXT_UX:
++      case TYPE_STORE_UX:
++        return true;
++      default:
++        break;
++    }
++    break;
+   default:
+     break;
+   }
+@@ -20554,8 +21315,8 @@ rs6000_handle_altivec_attribute (tree *n
+   else if (type == long_long_unsigned_type_node
+            || type == long_long_integer_type_node)
+     error ("use of %<long long%> in AltiVec types is invalid");
+-  else if (type == double_type_node)
+-    error ("use of %<double%> in AltiVec types is invalid");
++  else if (type == double_type_node && !TARGET_VSX)
++    error ("use of %<double%> in AltiVec types is invalid without -mvsx");
+   else if (type == long_double_type_node)
+     error ("use of %<long double%> in AltiVec types is invalid");
+   else if (type == boolean_type_node)
+@@ -20581,6 +21342,7 @@ rs6000_handle_altivec_attribute (tree *n
+ 	  result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
+ 	  break;
+ 	case SFmode: result = V4SF_type_node; break;
++	case DFmode: result = V2DF_type_node; break;
+ 	  /* If the user says 'vector int bool', we may be handed the 'bool'
+ 	     attribute _before_ the 'vector' attribute, and so select the
+ 	     proper type in the 'b' case below.  */
+@@ -22116,7 +22878,7 @@ rs6000_register_move_cost (enum machine_
+       if (! reg_classes_intersect_p (to, GENERAL_REGS))
+ 	from = to;
+ 
+-      if (from == FLOAT_REGS || from == ALTIVEC_REGS)
++      if (from == FLOAT_REGS || from == ALTIVEC_REGS || from == VSX_REGS)
+ 	return (rs6000_memory_move_cost (mode, from, 0)
+ 		+ rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
+ 
+@@ -22136,6 +22898,12 @@ rs6000_register_move_cost (enum machine_
+ 	return 2 * hard_regno_nregs[0][mode];
+     }
+ 
++  /* If we have VSX, we can easily move between FPR or Altivec registers.  */
++  else if (TARGET_VSX
++	   && ((from == VSX_REGS || from == FLOAT_REGS || from == ALTIVEC_REGS)
++	       || (to == VSX_REGS || to == FLOAT_REGS || to == ALTIVEC_REGS)))
++    return 2;
++
+   /* Moving between two similar registers is just one instruction.  */
+   else if (reg_classes_intersect_p (to, from))
+     return (mode == TFmode || mode == TDmode) ? 4 : 2;
+@@ -22376,8 +23144,8 @@ rs6000_emit_swrsqrtsf (rtx dst, rtx src)
+   emit_label (XEXP (label, 0));
+ }
+ 
+-/* Emit popcount intrinsic on TARGET_POPCNTB targets.  DST is the
+-   target, and SRC is the argument operand.  */
++/* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
++   (Power7) targets.  DST is the target, and SRC is the argument operand.  */
+ 
+ void
+ rs6000_emit_popcount (rtx dst, rtx src)
+@@ -22385,6 +23153,16 @@ rs6000_emit_popcount (rtx dst, rtx src)
+   enum machine_mode mode = GET_MODE (dst);
+   rtx tmp1, tmp2;
+ 
++  /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can.  */
++  if (TARGET_POPCNTD)
++    {
++      if (mode == SImode)
++	emit_insn (gen_popcntwsi2 (dst, src));
++      else
++	emit_insn (gen_popcntddi2 (dst, src));
++      return;
++    }
++
+   tmp1 = gen_reg_rtx (mode);
+ 
+   if (mode == SImode)
+@@ -22797,7 +23575,7 @@ rs6000_vector_mode_supported_p (enum mac
+   if (TARGET_SPE && SPE_VECTOR_MODE (mode))
+     return true;
+ 
+-  else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
++  else if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode))
+     return true;
+ 
+   else
+--- gcc/config/rs6000/vsx.md	(.../trunk)	(revision 0)
++++ gcc/config/rs6000/vsx.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -0,0 +1,864 @@
++;; VSX patterns.
++;; Copyright (C) 2009
++;; Free Software Foundation, Inc.
++;; Contributed by Michael Meissner <meissner at linux.vnet.ibm.com>
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published
++;; by the Free Software Foundation; either version 3, or (at your
++;; option) any later version.
++
++;; GCC is distributed in the hope that it will be useful, but WITHOUT
++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++;; License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++;; Iterator for vector floating point types supported by VSX
++(define_mode_iterator VSX_F [V4SF V2DF])
++
++;; Iterator for logical types supported by VSX
++(define_mode_iterator VSX_L [V16QI V8HI V4SI V2DI V4SF V2DF TI])
++
++;; Map into the appropriate load/store name based on the type
++(define_mode_attr VSm  [(V16QI "vw4")
++			(V8HI  "vw4")
++			(V4SI  "vw4")
++			(V4SF  "vw4")
++			(V2DF  "vd2")
++			(V2DI  "vd2")
++			(TI    "vw4")])
++
++;; Map into the appropriate suffix based on the type
++(define_mode_attr VSs	[(V16QI "sp")
++			 (V8HI  "sp")
++			 (V4SI  "sp")
++			 (V4SF  "sp")
++			 (V2DF  "dp")
++			 (V2DI  "dp")
++			 (TI    "sp")])
++
++;; Map into the register class used
++(define_mode_attr VSr	[(V16QI "v")
++			 (V8HI  "v")
++			 (V4SI  "v")
++			 (V4SF  "wf")
++			 (V2DI  "wd")
++			 (V2DF  "wd")
++			 (TI    "wd")])
++
++;; Same size integer type for floating point data
++(define_mode_attr VSi [(V4SF  "v4si")
++		       (V2DF  "v2di")])
++
++(define_mode_attr VSI [(V4SF  "V4SI")
++		       (V2DF  "V2DI")])
++
++;; Word size for same size conversion
++(define_mode_attr VSc [(V4SF "w")
++		       (V2DF "d")])
++
++;; Bitsize for DF load with update
++(define_mode_attr VSbit [(SI "32")
++			 (DI "64")])
++
++(define_constants
++  [(UNSPEC_VSX_CONCAT_V2DF	500)])
++
++;; VSX moves
++(define_insn "*vsx_mov<mode>"
++  [(set (match_operand:VSX_L 0 "nonimmediate_operand" "=Z,<VSr>,<VSr>,?Z,?wa,?wa,*o,*r,*r,<VSr>,?wa,v")
++	(match_operand:VSX_L 1 "input_operand" "<VSr>,Z,<VSr>,wa,Z,wa,r,o,r,j,j,W"))]
++  "VECTOR_MEM_VSX_P (<MODE>mode)
++   && (register_operand (operands[0], <MODE>mode) 
++       || register_operand (operands[1], <MODE>mode))"
++{
++  switch (which_alternative)
++    {
++    case 0:
++    case 3:
++      return "stx<VSm>%U0x %x1,%y0";
++
++    case 1:
++    case 4:
++      return "lx<VSm>%U0x %x0,%y1";
++
++    case 2:
++    case 5:
++      return "xvmov<VSs> %x0,%x1";
++
++    case 6:
++    case 7:
++    case 8:
++      return "#";
++
++    case 9:
++    case 10:
++      return "xxlxor %x0,%x0,%x0";
++
++    case 11:
++      return output_vec_const_move (operands);
++
++    default:
++      gcc_unreachable ();
++    }
++}
++  [(set_attr "type" "vecstore,vecload,vecsimple,vecstore,vecload,vecsimple,store,load,*,vecsimple,vecsimple,*")])
++
++;; Load/store with update
++;; Define insns that do load or store with update.  Because VSX only has
++;; reg+reg addressing, pre-decrement or pre-inrement is unlikely to be
++;; generated.
++;;
++;; In all these cases, we use operands 0 and 1 for the register being
++;; incremented because those are the operands that local-alloc will
++;; tie and these are the pair most likely to be tieable (and the ones
++;; that will benefit the most).
++
++(define_insn "*vsx_load<mode>_update64"
++  [(set (match_operand:VSX_L 3 "vsx_register_operand" "=<VSr>,?wa")
++	(mem:VSX_L (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
++			    (match_operand:DI 2 "gpc_reg_operand" "r,r"))))
++   (set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
++	(plus:DI (match_dup 1)
++		 (match_dup 2)))]
++  "TARGET_64BIT && TARGET_UPDATE && VECTOR_MEM_VSX_P (<MODE>mode)"
++  "lx<VSm>ux %x3,%0,%2"
++  [(set_attr "type" "vecload")])
++
++(define_insn "*vsx_load<mode>_update32"
++  [(set (match_operand:VSX_L 3 "vsx_register_operand" "=<VSr>,?wa")
++	(mem:VSX_L (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
++			    (match_operand:SI 2 "gpc_reg_operand" "r,r"))))
++   (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
++	(plus:SI (match_dup 1)
++		 (match_dup 2)))]
++  "TARGET_32BIT && TARGET_UPDATE && VECTOR_MEM_VSX_P (<MODE>mode)"
++  "lx<VSm>ux %x3,%0,%2"
++  [(set_attr "type" "vecload")])
++
++(define_insn "*vsx_store<mode>_update64"
++  [(set (mem:VSX_L (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
++			    (match_operand:DI 2 "gpc_reg_operand" "r,r")))
++	(match_operand:VSX_L 3 "gpc_reg_operand" "<VSr>,?wa"))
++   (set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
++	(plus:DI (match_dup 1)
++		 (match_dup 2)))]
++  "TARGET_64BIT && TARGET_UPDATE && VECTOR_MEM_VSX_P (<MODE>mode)"
++  "stx<VSm>ux %x3,%0,%2"
++  [(set_attr "type" "vecstore")])
++
++(define_insn "*vsx_store<mode>_update32"
++  [(set (mem:VSX_L (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
++			    (match_operand:SI 2 "gpc_reg_operand" "r,r")))
++	(match_operand:VSX_L 3 "gpc_reg_operand" "<VSr>,?wa"))
++   (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
++	(plus:SI (match_dup 1)
++		 (match_dup 2)))]
++  "TARGET_32BIT && TARGET_UPDATE && VECTOR_MEM_VSX_P (<MODE>mode)"
++  "stx<VSm>ux %x3,%0,%2"
++  [(set_attr "type" "vecstore")])
++
++(define_insn "*vsx_loaddf_update<VSbit>"
++  [(set (match_operand:DF 3 "vsx_register_operand" "=ws,?wa")
++	(mem:DF (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
++			(match_operand:P 2 "gpc_reg_operand" "r,r"))))
++   (set (match_operand:P 0 "gpc_reg_operand" "=b,b")
++	(plus:P (match_dup 1)
++		(match_dup 2)))]
++  "TARGET_<VSbit>BIT && TARGET_UPDATE && VECTOR_MEM_VSX_P (DFmode)"
++  "lxsdux %x3,%0,%2"
++  [(set_attr "type" "vecload")])
++
++(define_insn "*vsx_storedf_update<VSbit>"
++  [(set (mem:DF (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
++			(match_operand:P 2 "gpc_reg_operand" "r,r")))
++	(match_operand:DF 3 "gpc_reg_operand" "ws,?wa"))
++   (set (match_operand:P 0 "gpc_reg_operand" "=b,b")
++	(plus:P (match_dup 1)
++		(match_dup 2)))]
++  "TARGET_<VSbit>BIT && TARGET_UPDATE && VECTOR_MEM_VSX_P (DFmode)"
++  "stxsdux %x3,%0,%2"
++  [(set_attr "type" "vecstore")])
++
++;; We may need to have a varient on the pattern for use in the prologue
++;; that doesn't depend on TARGET_UPDATE.
++
++
++;; VSX vector floating point arithmetic instructions
++(define_insn "*vsx_add<mode>3"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (plus:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		    (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvadd<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_sub<mode>3"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (minus:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		     (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvsub<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_mul<mode>3"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (mult:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		    (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvmul<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_div<mode>3"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (div:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++	 	   (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvdiv<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "vecfdiv")])
++
++(define_insn "*vsx_fre<mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(unspec:VSX_F [(match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")]
++		      UNSPEC_FRES))]
++  "flag_finite_math_only && VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvre<VSs> %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_neg<mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (neg:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvneg<VSs> %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_abs<mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (abs:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvabs<VSs> %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_nabs<mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (neg:VSX_F
++	 (abs:VSX_F
++	  (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>"))))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvnabs<VSs> %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_smax<mode>3"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (smax:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		    (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvmax<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "veccmp")])
++
++(define_insn "*vsx_smin<mode>3"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (smin:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		    (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvmin<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "veccmp")])
++
++(define_insn "*vsx_sqrt<mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++        (sqrt:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvsqrt<VSs> %x0,%x1"
++  [(set_attr "type" "vecfdiv")])
++
++;; Fused vector multiply/add instructions
++(define_insn "*vsx_fmadd<mode>4"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,<VSr>")
++	(plus:VSX_F
++	 (mult:VSX_F
++	  (match_operand:VSX_F 1 "vsx_register_operand" "%<VSr>,<VSr>")
++	  (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>,0"))
++	 (match_operand:VSX_F 3 "gpc_reg_operand" "0,<VSr>")))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD"
++  "@
++   xvmadda<VSs> %x0,%x1,%x2
++   xvmaddm<VSs> %x0,%x1,%x3"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_fmsub<mode>4"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,<VSr>")
++	(minus:VSX_F
++	 (mult:VSX_F
++	  (match_operand:VSX_F 1 "vsx_register_operand" "%<VSr>,<VSr>")
++	  (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>,0"))
++	 (match_operand:VSX_F 3 "vsx_register_operand" "0,<VSr>")))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD"
++  "@
++   xvmsuba<VSs> %x0,%x1,%x2
++   xvmsubm<VSs> %x0,%x1,%x3"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_fnmadd<mode>4_1"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,<VSr>")
++	(neg:VSX_F
++	 (plus:VSX_F
++	  (mult:VSX_F
++	   (match_operand:VSX_F 1 "vsx_register_operand" "%<VSr>,<VSr>")
++	   (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>,0"))
++	  (match_operand:VSX_F 3 "vsx_register_operand" "0,<VSr>"))))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xvnmadda<VSs> %x0,%x1,%x2
++   xvnmaddm<VSs> %x0,%x1,%x3"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_fnmadd<mode>4_2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,<VSr>")
++	(minus:VSX_F
++	 (mult:VSX_F
++	  (neg:VSX_F
++	   (match_operand:VSX_F 1 "gpc_reg_operand" "%<VSr>,<VSr>"))
++	  (match_operand:VSX_F 2 "gpc_reg_operand" "<VSr>,0"))
++	 (match_operand:VSX_F 3 "vsx_register_operand" "0,<VSr>")))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && !HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xvnmadda<VSs> %x0,%x1,%x2
++   xvnmaddm<VSs> %x0,%x1,%x3"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_fnmsub<mode>4_1"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,<VSr>")
++	(neg:VSX_F
++	 (minus:VSX_F
++	  (mult:VSX_F
++	   (match_operand:VSX_F 1 "vsx_register_operand" "%<VSr>,<VSr>")
++	   (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>,0"))
++	  (match_operand:VSX_F 3 "vsx_register_operand" "0,<VSr>"))))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xvnmsuba<VSs> %x0,%x1,%x2
++   xvnmsubm<VSs> %x0,%x1,%x3"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_fnmsub<mode>4_2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,<VSr>")
++	(minus:VSX_F
++	 (match_operand:VSX_F 3 "vsx_register_operand" "0,<VSr>")
++	 (mult:VSX_F
++	  (match_operand:VSX_F 1 "vsx_register_operand" "%<VSr>,<VSr>")
++	  (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>,0"))))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && !HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xvnmsuba<VSs> %x0,%x1,%x2
++   xvnmsubm<VSs> %x0,%x1,%x3"
++  [(set_attr "type" "vecfloat")])
++
++;; Vector conditional expressions
++(define_insn "*vsx_eq<mode>"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(eq:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		  (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcmpeq<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "veccmp")])
++
++(define_insn "*vsx_gt<mode>"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(gt:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		  (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcmpgt<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "veccmp")])
++
++(define_insn "*vsx_ge<mode>"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(ge:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++		  (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcmpge<VSs> %x0,%x1,%x2"
++  [(set_attr "type" "veccmp")])
++
++(define_insn "vsx_vsel<mode>"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(if_then_else:VSX_F (ne (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")
++				(const_int 0))
++			    (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")
++			    (match_operand:VSX_F 3 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xxsel %x0,%x3,%x2,%x1"
++  [(set_attr "type" "vecperm")])
++
++;; Copy sign
++(define_insn "*vsx_copysign<mode>3"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(if_then_else:VSX_F
++	 (ge:VSX_F (match_operand:VSX_F 2 "vsx_register_operand" "<VSr>")
++		   (const_int 0))
++	 (abs:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>"))
++	 (neg:VSX_F (abs:VSX_F (match_dup 1)))))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcpsgn<VSs> %x0,%x2,%x1"
++  [(set_attr "type" "vecsimple")])
++
++(define_insn "*vsx_ftrunc<mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++  	(fix:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvr<VSs>piz %x0,%x1"
++  [(set_attr "type" "vecperm")])
++
++(define_insn "*vsx_float<VSi><mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(float:VSX_F (match_operand:<VSI> 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcvsx<VSc><VSs> %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_floatuns<VSi><mode>2"
++  [(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>")
++	(unsigned_float:VSX_F (match_operand:<VSI> 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcvux<VSc><VSs> %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_fix_trunc<mode><VSi>2"
++  [(set (match_operand:<VSI> 0 "vsx_register_operand" "=<VSr>")
++	(fix:<VSI> (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcv<VSs>sx<VSc>s %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++(define_insn "*vsx_fixuns_trunc<mode><VSi>2"
++  [(set (match_operand:<VSI> 0 "vsx_register_operand" "=<VSr>")
++	(unsigned_fix:<VSI> (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>")))]
++  "VECTOR_UNIT_VSX_P (<MODE>mode)"
++  "xvcv<VSs>ux<VSc>s %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++
++;; VSX scalar double precision floating point operations
++(define_insn"*vsx_adddf3"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(plus:DF (match_operand:DF 1 "vsx_register_operand" "ws")
++		     (match_operand:DF 2 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsadddp %x0,%x1,%x2"
++  [(set_attr "type" "fp")
++   (set_attr "fp_type" "fp_addsub_d")])
++
++(define_insn"*vsx_subdf3"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(minus:DF (match_operand:DF 1 "vsx_register_operand" "ws")
++		      (match_operand:DF 2 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xssubdp %x0,%x1,%x2"
++  [(set_attr "type" "fp")
++   (set_attr "fp_type" "fp_addsub_d")])
++
++(define_insn"*vsx_muldf3"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(mult:DF (match_operand:DF 1 "vsx_register_operand" "ws")
++		     (match_operand:DF 2 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsmuldp %x0,%x1,%x2"
++  [(set_attr "type" "dmul")
++   (set_attr "fp_type" "fp_mul_d")])
++
++(define_insn"*vsx_divdf3"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(div:DF (match_operand:DF 1 "vsx_register_operand" "ws")
++		    (match_operand:DF 2 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsdivdp %x0,%x1,%x2"
++  [(set_attr "type" "ddiv")])
++
++(define_insn "*vsx_fredf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(unspec:DF [(match_operand:DF 1 "vsx_register_operand" "ws")]
++		   UNSPEC_FRES))]
++  "flag_finite_math_only && VECTOR_UNIT_VSX_P (DFmode)"
++  "xsredp %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_sqrtdf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++        (sqrt:DF (match_operand:DF 1 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xssqrtdp %x0,%x1"
++  [(set_attr "type" "dsqrt")])
++
++(define_insn"*vsx_negdf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(neg:DF (match_operand:DF 1 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsnegdp %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn"vsx_absdf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(abs:DF (match_operand:DF 1 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsabsdp %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn"*vsx_nabsdf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(neg:DF (abs:DF (match_operand:DF 1 "vsx_register_operand" "ws"))))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsnabsdp %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_smaxdf3"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++        (smax:DF (match_operand:DF 1 "vsx_register_operand" "ws")
++		 (match_operand:DF 2 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsmaxdp %x0,%x1,%x2"
++  [(set_attr "type" "fp")])
++
++
++(define_insn "*vsx_smindf3"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++        (smin:DF (match_operand:DF 1 "vsx_register_operand" "ws")
++		 (match_operand:DF 2 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsmindp %x0,%x1,%x2"
++  [(set_attr "type" "fp")])
++
++;; Fused vector multiply/add instructions
++(define_insn "*vsx_fmadddf4"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws,ws")
++	(plus:DF (mult:DF (match_operand:DF 1 "vsx_register_operand" "%ws,ws")
++			  (match_operand:DF 2 "vsx_register_operand" "ws,0"))
++		 (match_operand:DF 3 "gpc_reg_operand" "0,ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD"
++  "@
++   xsmaddadp %x0,%x1,%x2
++   xsmaddmdp %x0,%x1,%x3"
++  [(set_attr "type" "dmul")
++   (set_attr "fp_type" "fp_maddsub_d")])
++
++(define_insn "*vsx_fmsubdf4"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws,ws")
++	(minus:DF (mult:DF (match_operand:DF 1 "vsx_register_operand" "%ws,ws")
++			   (match_operand:DF 2 "vsx_register_operand" "ws,0"))
++		  (match_operand:DF 3 "vsx_register_operand" "0,ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD"
++  "@
++   xsmsubadp %x0,%x1,%x2
++   xsmsubmdp %x0,%x1,%x3"
++  [(set_attr "type" "dmul")
++   (set_attr "fp_type" "fp_maddsub_d")])
++
++(define_insn "*vsx_fnmadddf4_1"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws,ws")
++	(neg:DF
++	 (plus:DF (mult:DF (match_operand:DF 1 "vsx_register_operand" "%ws,ws")
++			   (match_operand:DF 2 "vsx_register_operand" "ws,0"))
++		  (match_operand:DF 3 "vsx_register_operand" "0,ws"))))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xsnmaddadp %x0,%x1,%x2
++   xsnmaddmdp %x0,%x1,%x3"
++  [(set_attr "type" "dmul")
++   (set_attr "fp_type" "fp_maddsub_d")])
++
++(define_insn "*vsx_fnmadddf4_2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws,ws")
++	(minus:DF (mult:DF (neg:DF
++			    (match_operand:DF 1 "gpc_reg_operand" "%ws,ws"))
++			   (match_operand:DF 2 "gpc_reg_operand" "ws,0"))
++		  (match_operand:DF 3 "vsx_register_operand" "0,ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && !HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xsnmaddadp %x0,%x1,%x2
++   xsnmaddmdp %x0,%x1,%x3"
++  [(set_attr "type" "dmul")
++   (set_attr "fp_type" "fp_maddsub_d")])
++
++(define_insn "*vsx_fnmsubdf4_1"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws,ws")
++	(neg:DF
++	 (minus:DF
++	  (mult:DF (match_operand:DF 1 "vsx_register_operand" "%ws,ws")
++		   (match_operand:DF 2 "vsx_register_operand" "ws,0"))
++	  (match_operand:DF 3 "vsx_register_operand" "0,ws"))))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xsnmsubadp %x0,%x1,%x2
++   xsnmsubmdp %x0,%x1,%x3"
++  [(set_attr "type" "dmul")
++   (set_attr "fp_type" "fp_maddsub_d")])
++
++(define_insn "*vsx_fnmsubdf4_2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws,ws")
++	(minus:DF
++	 (match_operand:DF 3 "vsx_register_operand" "0,ws")
++	 (mult:DF (match_operand:DF 1 "vsx_register_operand" "%ws,ws")
++		  (match_operand:DF 2 "vsx_register_operand" "ws,0"))))]
++  "VECTOR_UNIT_VSX_P (DFmode) && TARGET_FUSED_MADD
++   && !HONOR_SIGNED_ZEROS (DFmode)"
++  "@
++   xsnmsubadp %x0,%x1,%x2
++   xsnmsubmdp %x0,%x1,%x3"
++  [(set_attr "type" "dmul")
++   (set_attr "fp_type" "fp_maddsub_d")])
++
++;; For the conversions, limit the register class for the integer value to be
++;; the fprs.  For the unsigned tests, there isn't a generic double -> unsigned
++;; conversion in rs6000.md so don't test VECTOR_UNIT_VSX_P, just test against
++;; VSX.
++
++(define_insn "*vsx_floatdidf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(float:DF (match_operand:DI 1 "vsx_register_operand" "!f#r")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xscvsxddp %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_floatunsdidf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(unsigned_float:DF (match_operand:DI 1 "vsx_register_operand" "!f#r")))]
++  "TARGET_HARD_FLOAT && TARGET_VSX"
++  "xscvuxddp %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_fix_truncdfdi2"
++  [(set (match_operand:DI 0 "vsx_register_operand" "=!f#r")
++	(fix:DI (match_operand:DF 1 "vsx_register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xscvdpsxds %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_fixuns_truncdfdi2"
++  [(set (match_operand:DI 0 "vsx_register_operand" "=!f#r")
++	(unsigned_fix:DI (match_operand:DF 1 "vsx_register_operand" "ws")))]
++  "TARGET_HARD_FLOAT && TARGET_VSX"
++  "xscvdpuxds %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_btruncdf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(unspec:DF [(match_operand:DF 1 "vsx_register_operand" "ws")]
++		   UNSPEC_FRIZ))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsrdpiz %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_floordf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(unspec:DF [(match_operand:DF 1 "vsx_register_operand" "ws")]
++		   UNSPEC_FRIM))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsrdpim %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_ceildf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(unspec:DF [(match_operand:DF 1 "vsx_register_operand" "ws")]
++		   UNSPEC_FRIP))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsrdpip %x0,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "vsx_copysigndf3"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(if_then_else:DF (ge:DF (match_operand:DF 2 "vsx_register_operand" "ws")
++				(const_int 0))
++			 (abs:DF (match_operand:DF 1 "vsx_register_operand" "ws"))
++			 (neg:DF (abs:DF (match_dup 1)))))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xscpsgndp %x0,%x2,%x1"
++  [(set_attr "type" "fp")])
++
++(define_insn "*vsx_ftruncdf2"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++  	(fix:DF (match_operand:DF 1 "register_operand" "ws")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "xsrdppiz %x0,%x1"
++  [(set_attr "type" "vecfloat")])
++
++
++;; Logical and permute operations
++(define_insn "*vsx_and<mode>3"
++  [(set (match_operand:VSX_L 0 "vsx_register_operand" "=<VSr>,?wa")
++        (and:VSX_L
++	 (match_operand:VSX_L 1 "vsx_register_operand" "<VSr>,?wa")
++	 (match_operand:VSX_L 2 "vsx_register_operand" "<VSr>,?wa")))]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++  "xxland %x0,%x1,%x2"
++  [(set_attr "type" "vecsimple")])
++
++(define_insn "*vsx_ior<mode>3"
++  [(set (match_operand:VSX_L 0 "vsx_register_operand" "=<VSr>,?wa")
++        (ior:VSX_L (match_operand:VSX_L 1 "vsx_register_operand" "<VSr>,?wa")
++		   (match_operand:VSX_L 2 "vsx_register_operand" "<VSr>,?wa")))]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++  "xxlor %x0,%x1,%x2"
++  [(set_attr "type" "vecsimple")])
++
++(define_insn "*vsx_xor<mode>3"
++  [(set (match_operand:VSX_L 0 "vsx_register_operand" "=<VSr>,?wa")
++        (xor:VSX_L
++	 (match_operand:VSX_L 1 "vsx_register_operand" "<VSr>,?wa")
++	 (match_operand:VSX_L 2 "vsx_register_operand" "<VSr>,?wa")))]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++  "xxlxor %x0,%x1,%x2"
++  [(set_attr "type" "vecsimple")])
++
++(define_insn "*vsx_one_cmpl<mode>2"
++  [(set (match_operand:VSX_L 0 "vsx_register_operand" "=<VSr>,?wa")
++        (not:VSX_L
++	 (match_operand:VSX_L 1 "vsx_register_operand" "<VSr>,?wa")))]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++  "xxlnor %x0,%x1,%x1"
++  [(set_attr "type" "vecsimple")])
++  
++(define_insn "*vsx_nor<mode>3"
++  [(set (match_operand:VSX_L 0 "vsx_register_operand" "=<VSr>,?wa")
++        (not:VSX_L
++	 (ior:VSX_L
++	  (match_operand:VSX_L 1 "vsx_register_operand" "<VSr>,?wa")
++	  (match_operand:VSX_L 2 "vsx_register_operand" "<VSr>,?wa"))))]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++  "xxlnor %x0,%x1,%x2"
++  [(set_attr "type" "vecsimple")])
++
++(define_insn "*vsx_andc<mode>3"
++  [(set (match_operand:VSX_L 0 "vsx_register_operand" "=<VSr>,?wa")
++        (and:VSX_L
++	 (not:VSX_L
++	  (match_operand:VSX_L 2 "vsx_register_operand" "<VSr>,?wa"))
++	 (match_operand:VSX_L 1 "vsx_register_operand" "<VSr>,?wa")))]
++  "VECTOR_MEM_VSX_P (<MODE>mode)"
++  "xxlandc %x0,%x1,%x2"
++  [(set_attr "type" "vecsimple")])
++
++
++;; Permute operations
++
++(define_insn "vsx_concat_v2df"
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,?wa")
++	(unspec:V2DF
++	 [(match_operand:DF 1 "vsx_register_operand" "f,wa")
++	  (match_operand:DF 2 "vsx_register_operand" "f,wa")]
++	 UNSPEC_VSX_CONCAT_V2DF))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "xxpermdi %x0,%x1,%x2,0"
++  [(set_attr "type" "vecperm")])
++
++;; Set a double into one element
++(define_insn "vsx_set_v2df"
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd")
++	(vec_merge:V2DF
++	 (match_operand:V2DF 1 "vsx_register_operand" "wd")
++	 (vec_duplicate:V2DF (match_operand:DF 2 "vsx_register_operand" "ws"))
++	 (match_operand:QI 3 "u5bit_cint_operand" "i")))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++{
++  operands[3] = GEN_INT (INTVAL (operands[3]) & 1);
++  return \"xxpermdi %x0,%x1,%x2,%3\";
++}
++  [(set_attr "type" "vecperm")])
++
++;; Extract a DF element from V2DF
++(define_insn "vsx_extract_v2df"
++  [(set (match_operand:DF 0 "vsx_register_operand" "=ws")
++	(vec_select:DF (match_operand:V2DF 1 "vsx_register_operand" "wd")
++		       (parallel
++			[(match_operand:QI 2 "u5bit_cint_operand" "i")])))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++{
++  operands[3] = GEN_INT (INTVAL (operands[2]) & 1);
++  return \"xxpermdi %x0,%x1,%x1,%3\";
++}
++  [(set_attr "type" "vecperm")])
++
++;; General V2DF permute
++(define_insn "vsx_xxpermdi"
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd")
++	(vec_concat:V2DF
++	 (vec_select:DF (match_operand:V2DF 1 "vsx_register_operand" "wd")
++			(parallel
++			 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))
++	 (vec_select:DF (match_operand:V2DF 3 "vsx_register_operand" "wd")
++			(parallel
++			 [(match_operand:QI 4 "u5bit_cint_operand" "i")]))))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++{
++  operands[5] = GEN_INT (((INTVAL (operands[2]) & 1) << 1)
++			 | (INTVAL (operands[4]) & 1));
++  return \"xxpermdi %x0,%x1,%x3,%5\";
++}
++  [(set_attr "type" "vecperm")])
++
++;; V2DF splat
++(define_insn "vsx_splatv2df"
++  [(set (match_operand:V2DF 0 "vsx_register_operand" "=wd,wd")
++	(vec_duplicate:V2DF
++	 (match_operand:DF 1 "input_operand" "ws,Z")))]
++  "VECTOR_UNIT_VSX_P (V2DFmode)"
++  "@
++   xxpermdi %x0,%x1,%x1,0
++   lxvdsx %x0,%y1"
++  [(set_attr "type" "vecperm,vecload")])
++
++;; V4SF splat
++(define_insn "*vsx_xxspltw"
++  [(set (match_operand:V4SF 0 "vsx_register_operand" "=wf")
++	(vec_duplicate:V4SF
++	 (vec_select:SF (match_operand:V4SF 1 "vsx_register_operand" "wf")
++			(parallel
++			 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
++  "VECTOR_UNIT_VSX_P (V4SFmode)"
++  "xxspltw %x0,%x1,%2"
++  [(set_attr "type" "vecperm")])
++
++;; V4SF interleave
++(define_insn "*vsx_xxmrghw"
++  [(set (match_operand:V4SF 0 "register_operand" "=v")
++        (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
++                                         (parallel [(const_int 0)
++                                                    (const_int 2)
++                                                    (const_int 1)
++                                                    (const_int 3)]))
++                        (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
++                                         (parallel [(const_int 2)
++                                                    (const_int 0)
++                                                    (const_int 3)
++                                                    (const_int 1)]))
++                      (const_int 5)))]
++  "VECTOR_UNIT_VSX_P (V4SFmode)"
++  "xxmrghw %x0,%x1,%x2"
++  [(set_attr "type" "vecperm")])
++
++(define_insn "*vsx_xxmrglw"
++  [(set (match_operand:V4SF 0 "register_operand" "=v")
++        (vec_merge:V4SF
++	 (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
++			  (parallel [(const_int 2)
++				     (const_int 0)
++				     (const_int 3)
++				     (const_int 1)]))
++	 (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
++			  (parallel [(const_int 0)
++				     (const_int 2)
++				     (const_int 1)
++				     (const_int 3)]))
++	 (const_int 5)))]
++  "VECTOR_UNIT_VSX_P (V4SFmode)"
++  "xxmrglw %x0,%x1,%x2"
++  [(set_attr "type" "vecperm")])
+--- gcc/config/rs6000/rs6000.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/rs6000.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -1,6 +1,6 @@
+ /* Definitions of target machine for GNU compiler, for IBM RS/6000.
+    Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+-   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
++   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+    Free Software Foundation, Inc.
+    Contributed by Richard Kenner (kenner at vlsi1.ultra.nyu.edu)
+ 
+@@ -72,14 +72,16 @@
+ #define ASM_CPU_POWER6_SPEC "-mpower4 -maltivec"
+ #endif
+ 
+-#ifdef HAVE_AS_VSX
++#ifdef HAVE_AS_POPCNTD
+ #define ASM_CPU_POWER7_SPEC "-mpower7"
+ #else
+ #define ASM_CPU_POWER7_SPEC "-mpower4 -maltivec"
+ #endif
+ 
+-/* Common ASM definitions used by ASM_SPEC among the various targets
+-   for handling -mcpu=xxx switches.  */
++/* Common ASM definitions used by ASM_SPEC among the various targets for
++   handling -mcpu=xxx switches.  There is a parallel list in driver-rs6000.c to
++   provide the default assembler options if the user uses -mcpu=native, so if
++   you make changes here, make them also there.  */
+ #define ASM_CPU_SPEC \
+ "%{!mcpu*: \
+   %{mpower: %{!mpower2: -mpwr}} \
+@@ -88,6 +90,7 @@
+   %{!mpowerpc64*: %{mpowerpc*: -mppc}} \
+   %{mno-power: %{!mpowerpc*: -mcom}} \
+   %{!mno-power: %{!mpower*: %(asm_default)}}} \
++%{mcpu=native: %(asm_cpu_native)} \
+ %{mcpu=common: -mcom} \
+ %{mcpu=cell: -mcell} \
+ %{mcpu=power: -mpwr} \
+@@ -163,6 +166,7 @@
+ #define EXTRA_SPECS							\
+   { "cpp_default",		CPP_DEFAULT_SPEC },			\
+   { "asm_cpu",			ASM_CPU_SPEC },				\
++  { "asm_cpu_native",		ASM_CPU_NATIVE_SPEC },			\
+   { "asm_default",		ASM_DEFAULT_SPEC },			\
+   { "cc1_cpu",			CC1_CPU_SPEC },				\
+   { "asm_cpu_power5",		ASM_CPU_POWER5_SPEC },			\
+@@ -179,6 +183,10 @@ extern const char *host_detect_local_cpu
+ #define EXTRA_SPEC_FUNCTIONS \
+   { "local_cpu_detect", host_detect_local_cpu },
+ #define HAVE_LOCAL_CPU_DETECT
++#define ASM_CPU_NATIVE_SPEC "%:local_cpu_detect(asm)"
++
++#else
++#define ASM_CPU_NATIVE_SPEC "%(asm_default)"
+ #endif
+ 
+ #ifndef CC1_CPU_SPEC
+@@ -240,6 +248,14 @@ extern const char *host_detect_local_cpu
+ #define TARGET_DFP 0
+ #endif
+ 
++/* Define TARGET_POPCNTD if the target assembler does not support the
++   popcount word and double word instructions.  */
++
++#ifndef HAVE_AS_POPCNTD
++#undef  TARGET_POPCNTD
++#define TARGET_POPCNTD 0
++#endif
++
+ #ifndef TARGET_SECURE_PLT
+ #define TARGET_SECURE_PLT 0
+ #endif
+@@ -295,6 +311,7 @@ enum processor_type
+    PROCESSOR_POWER4,
+    PROCESSOR_POWER5,
+    PROCESSOR_POWER6,
++   PROCESSOR_POWER7,
+    PROCESSOR_CELL
+ };
+ 
+@@ -388,9 +405,13 @@ extern struct rs6000_cpu_select rs6000_s
+ extern const char *rs6000_debug_name;	/* Name for -mdebug-xxxx option */
+ extern int rs6000_debug_stack;		/* debug stack applications */
+ extern int rs6000_debug_arg;		/* debug argument handling */
++extern int rs6000_debug_reg;		/* debug register handling */
++extern int rs6000_debug_addr;		/* debug memory addressing */
+ 
+ #define	TARGET_DEBUG_STACK	rs6000_debug_stack
+ #define	TARGET_DEBUG_ARG	rs6000_debug_arg
++#define TARGET_DEBUG_REG	rs6000_debug_reg
++#define TARGET_DEBUG_ADDR	rs6000_debug_addr
+ 
+ extern const char *rs6000_traceback_name; /* Type of traceback table.  */
+ 
+@@ -401,13 +422,65 @@ extern int rs6000_ieeequad;
+ extern int rs6000_altivec_abi;
+ extern int rs6000_spe_abi;
+ extern int rs6000_spe;
+-extern int rs6000_isel;
+ extern int rs6000_float_gprs;
+ extern int rs6000_alignment_flags;
+ extern const char *rs6000_sched_insert_nops_str;
+ extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
+ extern int rs6000_xilinx_fpu;
+ 
++/* Describe which vector unit to use for a given machine mode.  */
++enum rs6000_vector {
++  VECTOR_NONE,			/* Type is not  a vector or not supported */
++  VECTOR_ALTIVEC,		/* Use altivec for vector processing */
++  VECTOR_VSX,			/* Use VSX for vector processing */
++  VECTOR_PAIRED,		/* Use paired floating point for vectors */
++  VECTOR_SPE,			/* Use SPE for vector processing */
++  VECTOR_OTHER			/* Some other vector unit */
++};
++
++extern enum rs6000_vector rs6000_vector_unit[];
++
++#define VECTOR_UNIT_NONE_P(MODE)			\
++  (rs6000_vector_unit[(MODE)] == VECTOR_NONE)
++
++#define VECTOR_UNIT_VSX_P(MODE)				\
++  (rs6000_vector_unit[(MODE)] == VECTOR_VSX)
++
++#define VECTOR_UNIT_ALTIVEC_P(MODE)			\
++  (rs6000_vector_unit[(MODE)] == VECTOR_ALTIVEC)
++
++#define VECTOR_UNIT_ALTIVEC_OR_VSX_P(MODE)		\
++  (rs6000_vector_unit[(MODE)] == VECTOR_ALTIVEC 	\
++   || rs6000_vector_unit[(MODE)] == VECTOR_VSX)
++
++/* Describe whether to use VSX loads or Altivec loads.  For now, just use the
++   same unit as the vector unit we are using, but we may want to migrate to
++   using VSX style loads even for types handled by altivec.  */
++extern enum rs6000_vector rs6000_vector_mem[];
++
++#define VECTOR_MEM_NONE_P(MODE)				\
++  (rs6000_vector_mem[(MODE)] == VECTOR_NONE)
++
++#define VECTOR_MEM_VSX_P(MODE)				\
++  (rs6000_vector_mem[(MODE)] == VECTOR_VSX)
++
++#define VECTOR_MEM_ALTIVEC_P(MODE)			\
++  (rs6000_vector_mem[(MODE)] == VECTOR_ALTIVEC)
++
++#define VECTOR_MEM_ALTIVEC_OR_VSX_P(MODE)		\
++  (rs6000_vector_mem[(MODE)] == VECTOR_ALTIVEC 	\
++   || rs6000_vector_mem[(MODE)] == VECTOR_VSX)
++
++/* Return the alignment of a given vector type, which is set based on the
++   vector unit use.  VSX for instance can load 32 or 64 bit aligned words
++   without problems, while Altivec requires 128-bit aligned vectors.  */
++extern int rs6000_vector_align[];
++
++#define VECTOR_ALIGN(MODE)						\
++  ((rs6000_vector_align[(MODE)] != 0)					\
++   ? rs6000_vector_align[(MODE)]					\
++   : (int)GET_MODE_BITSIZE ((MODE)))
++
+ /* Alignment options for fields in structures for sub-targets following
+    AIX-like ABI.
+    ALIGN_POWER word-aligns FP doubles (default AIX ABI).
+@@ -432,7 +505,7 @@ extern int rs6000_xilinx_fpu;
+ #define TARGET_SPE_ABI 0
+ #define TARGET_SPE 0
+ #define TARGET_E500 0
+-#define TARGET_ISEL rs6000_isel
++#define TARGET_ISEL64 (TARGET_ISEL && TARGET_POWERPC64)
+ #define TARGET_FPRS 1
+ #define TARGET_E500_SINGLE 0
+ #define TARGET_E500_DOUBLE 0
+@@ -530,6 +603,7 @@ extern int rs6000_xilinx_fpu;
+ #endif
+ #define UNITS_PER_FP_WORD 8
+ #define UNITS_PER_ALTIVEC_WORD 16
++#define UNITS_PER_VSX_WORD 16
+ #define UNITS_PER_SPE_WORD 8
+ #define UNITS_PER_PAIRED_WORD 8
+ 
+@@ -600,8 +674,9 @@ extern int rs6000_xilinx_fpu;
+ #define PARM_BOUNDARY (TARGET_32BIT ? 32 : 64)
+ 
+ /* Boundary (in *bits*) on which stack pointer should be aligned.  */
+-#define STACK_BOUNDARY \
+-  ((TARGET_32BIT && !TARGET_ALTIVEC && !TARGET_ALTIVEC_ABI) ? 64 : 128)
++#define STACK_BOUNDARY	\
++  ((TARGET_32BIT && !TARGET_ALTIVEC && !TARGET_ALTIVEC_ABI && !TARGET_VSX) \
++    ? 64 : 128)
+ 
+ /* Allocation boundary (in *bits*) for the code of a function.  */
+ #define FUNCTION_BOUNDARY 32
+@@ -613,10 +688,11 @@ extern int rs6000_xilinx_fpu;
+    local store.  TYPE is the data type, and ALIGN is the alignment
+    that the object would ordinarily have.  */
+ #define LOCAL_ALIGNMENT(TYPE, ALIGN)				\
+-  ((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 :	\
++  (((TARGET_ALTIVEC || TARGET_VSX)				\
++    && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 :			\
+     (TARGET_E500_DOUBLE						\
+-     && TYPE_MODE (TYPE) == DFmode) ? 64 : \
+-    ((TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE \
++     && TYPE_MODE (TYPE) == DFmode) ? 64 :			\
++    ((TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE		\
+      && SPE_VECTOR_MODE (TYPE_MODE (TYPE))) || (TARGET_PAIRED_FLOAT \
+         && TREE_CODE (TYPE) == VECTOR_TYPE \
+         && PAIRED_VECTOR_MODE (TYPE_MODE (TYPE)))) ? 64 : ALIGN)
+@@ -674,15 +750,17 @@ extern int rs6000_xilinx_fpu;
+ /* Define this macro to be the value 1 if unaligned accesses have a cost
+    many times greater than aligned accesses, for example if they are
+    emulated in a trap handler.  */
+-/* Altivec vector memory instructions simply ignore the low bits; SPE
+-   vector memory instructions trap on unaligned accesses.  */
++/* Altivec vector memory instructions simply ignore the low bits; SPE vector
++   memory instructions trap on unaligned accesses; VSX memory instructions are
++   aligned to 4 or 8 bytes.  */
+ #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN)				\
+   (STRICT_ALIGNMENT							\
+    || (((MODE) == SFmode || (MODE) == DFmode || (MODE) == TFmode	\
+ 	|| (MODE) == SDmode || (MODE) == DDmode || (MODE) == TDmode	\
+ 	|| (MODE) == DImode)						\
+        && (ALIGN) < 32)							\
+-   || (VECTOR_MODE_P ((MODE)) && (ALIGN) < GET_MODE_BITSIZE ((MODE))))
++   || (VECTOR_MODE_P ((MODE)) && (((int)(ALIGN)) < VECTOR_ALIGN (MODE))))
++
+ 
+ /* Standard register usage.  */
+ 
+@@ -909,16 +987,60 @@ extern int rs6000_xilinx_fpu;
+ /* True if register is an AltiVec register.  */
+ #define ALTIVEC_REGNO_P(N) ((N) >= FIRST_ALTIVEC_REGNO && (N) <= LAST_ALTIVEC_REGNO)
+ 
++/* True if register is a VSX register.  */
++#define VSX_REGNO_P(N) (FP_REGNO_P (N) || ALTIVEC_REGNO_P (N))
++
++/* Alternate name for any vector register supporting floating point, no matter
++   which instruction set(s) are available.  */
++#define VFLOAT_REGNO_P(N) \
++  (ALTIVEC_REGNO_P (N) || (TARGET_VSX && FP_REGNO_P (N)))
++
++/* Alternate name for any vector register supporting integer, no matter which
++   instruction set(s) are available.  */
++#define VINT_REGNO_P(N) ALTIVEC_REGNO_P (N)
++
++/* Alternate name for any vector register supporting logical operations, no
++   matter which instruction set(s) are available.  */
++#define VLOGICAL_REGNO_P(N) VFLOAT_REGNO_P (N)
++
+ /* Return number of consecutive hard regs needed starting at reg REGNO
+    to hold something of mode MODE.  */
+ 
+-#define HARD_REGNO_NREGS(REGNO, MODE) rs6000_hard_regno_nregs ((REGNO), (MODE))
++#define HARD_REGNO_NREGS(REGNO, MODE) rs6000_hard_regno_nregs[(MODE)][(REGNO)]
+ 
+ #define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE)	\
+   ((TARGET_32BIT && TARGET_POWERPC64			\
+     && (GET_MODE_SIZE (MODE) > 4)  \
+     && INT_REGNO_P (REGNO)) ? 1 : 0)
+ 
++#define VSX_VECTOR_MODE(MODE)		\
++	 ((MODE) == V4SFmode		\
++	  || (MODE) == V2DFmode)	\
++
++#define VSX_VECTOR_MOVE_MODE(MODE)	\
++	 ((MODE) == V16QImode		\
++	  || (MODE) == V8HImode		\
++	  || (MODE) == V4SImode		\
++	  || (MODE) == V2DImode		\
++	  || (MODE) == V4SFmode		\
++	  || (MODE) == V2DFmode)	\
++
++#define VSX_SCALAR_MODE(MODE)		\
++	((MODE) == DFmode)
++
++#define VSX_MODE(MODE)			\
++	(VSX_VECTOR_MODE (MODE)		\
++	 || VSX_SCALAR_MODE (MODE))
++
++#define VSX_MOVE_MODE(MODE)		\
++	(VSX_VECTOR_MOVE_MODE (MODE)	\
++	 || VSX_SCALAR_MODE(MODE)	\
++	 || (MODE) == V16QImode		\
++	 || (MODE) == V8HImode		\
++	 || (MODE) == V4SImode		\
++	 || (MODE) == V2DImode		\
++	 || (MODE) == TImode)
++
+ #define ALTIVEC_VECTOR_MODE(MODE)	\
+ 	 ((MODE) == V16QImode		\
+ 	  || (MODE) == V8HImode		\
+@@ -934,10 +1056,12 @@ extern int rs6000_xilinx_fpu;
+ #define PAIRED_VECTOR_MODE(MODE)        \
+          ((MODE) == V2SFmode)            
+ 
+-#define UNITS_PER_SIMD_WORD(MODE)				     \
+-	(TARGET_ALTIVEC ? UNITS_PER_ALTIVEC_WORD		     \
+-	 : (TARGET_SPE ? UNITS_PER_SPE_WORD : (TARGET_PAIRED_FLOAT ? \
+-	 UNITS_PER_PAIRED_WORD : UNITS_PER_WORD)))
++#define UNITS_PER_SIMD_WORD(MODE)					\
++	(TARGET_VSX ? UNITS_PER_VSX_WORD				\
++	 : (TARGET_ALTIVEC ? UNITS_PER_ALTIVEC_WORD			\
++	 : (TARGET_SPE ? UNITS_PER_SPE_WORD				\
++	 : (TARGET_PAIRED_FLOAT ? UNITS_PER_PAIRED_WORD			\
++	 : UNITS_PER_WORD))))
+ 
+ /* Value is TRUE if hard register REGNO can hold a value of
+    machine-mode MODE.  */
+@@ -965,6 +1089,10 @@ extern int rs6000_xilinx_fpu;
+    ? ALTIVEC_VECTOR_MODE (MODE2)		\
+    : ALTIVEC_VECTOR_MODE (MODE2)		\
+    ? ALTIVEC_VECTOR_MODE (MODE1)		\
++   : VSX_VECTOR_MODE (MODE1)			\
++   ? VSX_VECTOR_MODE (MODE2)			\
++   : VSX_VECTOR_MODE (MODE2)			\
++   ? VSX_VECTOR_MODE (MODE1)			\
+    : 1)
+ 
+ /* Post-reload, we can't use any new AltiVec registers, as we already
+@@ -1056,9 +1184,10 @@ extern int rs6000_xilinx_fpu;
+    For any two classes, it is very desirable that there be another
+    class that represents their union.  */
+ 
+-/* The RS/6000 has three types of registers, fixed-point, floating-point,
+-   and condition registers, plus three special registers, MQ, CTR, and the
+-   link register.  AltiVec adds a vector register class.
++/* The RS/6000 has three types of registers, fixed-point, floating-point, and
++   condition registers, plus three special registers, MQ, CTR, and the link
++   register.  AltiVec adds a vector register class.  VSX registers overlap the
++   FPR registers and the Altivec registers.
+ 
+    However, r0 is special in that it cannot be used as a base register.
+    So make a class for registers valid as base registers.
+@@ -1073,6 +1202,7 @@ enum reg_class
+   GENERAL_REGS,
+   FLOAT_REGS,
+   ALTIVEC_REGS,
++  VSX_REGS,
+   VRSAVE_REGS,
+   VSCR_REGS,
+   SPE_ACC_REGS,
+@@ -1103,6 +1233,7 @@ enum reg_class
+   "GENERAL_REGS",							\
+   "FLOAT_REGS",								\
+   "ALTIVEC_REGS",							\
++  "VSX_REGS",								\
+   "VRSAVE_REGS",							\
+   "VSCR_REGS",								\
+   "SPE_ACC_REGS",                                                       \
+@@ -1132,6 +1263,7 @@ enum reg_class
+   { 0xffffffff, 0x00000000, 0x00000008, 0x00020000 }, /* GENERAL_REGS */     \
+   { 0x00000000, 0xffffffff, 0x00000000, 0x00000000 }, /* FLOAT_REGS */       \
+   { 0x00000000, 0x00000000, 0xffffe000, 0x00001fff }, /* ALTIVEC_REGS */     \
++  { 0x00000000, 0xffffffff, 0xffffe000, 0x00001fff }, /* VSX_REGS */	     \
+   { 0x00000000, 0x00000000, 0x00000000, 0x00002000 }, /* VRSAVE_REGS */	     \
+   { 0x00000000, 0x00000000, 0x00000000, 0x00004000 }, /* VSCR_REGS */	     \
+   { 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, /* SPE_ACC_REGS */     \
+@@ -1179,8 +1311,8 @@ enum reg_class
+   : (REGNO) == CR0_REGNO ? CR0_REGS		\
+   : CR_REGNO_P (REGNO) ? CR_REGS		\
+   : (REGNO) == MQ_REGNO ? MQ_REGS		\
+-  : (REGNO) == LR_REGNO ? LINK_REGS	\
+-  : (REGNO) == CTR_REGNO ? CTR_REGS	\
++  : (REGNO) == LR_REGNO ? LINK_REGS		\
++  : (REGNO) == CTR_REGNO ? CTR_REGS		\
+   : (REGNO) == ARG_POINTER_REGNUM ? BASE_REGS	\
+   : (REGNO) == XER_REGNO ? XER_REGS		\
+   : (REGNO) == VRSAVE_REGNO ? VRSAVE_REGS	\
+@@ -1190,10 +1322,18 @@ enum reg_class
+   : (REGNO) == FRAME_POINTER_REGNUM ? BASE_REGS	\
+   : NO_REGS)
+ 
++/* VSX register classes.  */
++extern enum reg_class rs6000_vector_reg_class[];
++extern enum reg_class rs6000_vsx_reg_class;
++
+ /* The class value for index registers, and the one for base regs.  */
+ #define INDEX_REG_CLASS GENERAL_REGS
+ #define BASE_REG_CLASS BASE_REGS
+ 
++/* Return whether a given register class can hold VSX objects.  */
++#define VSX_REG_CLASS_P(CLASS)			\
++  ((CLASS) == VSX_REGS || (CLASS) == FLOAT_REGS || (CLASS) == ALTIVEC_REGS)
++
+ /* Given an rtx X being reloaded into a reg required to be
+    in class CLASS, return the class of reg to actually use.
+    In general this is just CLASS; but on some machines
+@@ -1213,13 +1353,7 @@ enum reg_class
+  */
+ 
+ #define PREFERRED_RELOAD_CLASS(X,CLASS)			\
+-  ((CONSTANT_P (X)					\
+-    && reg_classes_intersect_p ((CLASS), FLOAT_REGS))	\
+-   ? NO_REGS 						\
+-   : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT 	\
+-      && (CLASS) == NON_SPECIAL_REGS)			\
+-   ? GENERAL_REGS					\
+-   : (CLASS))
++  rs6000_preferred_reload_class (X, CLASS)
+ 
+ /* Return the register class of a scratch register needed to copy IN into
+    or out of a register in CLASS in MODE.  If it can be done directly,
+@@ -1234,18 +1368,7 @@ enum reg_class
+    are available.*/
+ 
+ #define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE)			\
+- ((CLASS1) != (CLASS2) && (((CLASS1) == FLOAT_REGS			\
+-                            && (!TARGET_MFPGPR || !TARGET_POWERPC64	\
+-				|| ((MODE != DFmode)			\
+-				    && (MODE != DDmode)			\
+-				    && (MODE != DImode))))		\
+-			   || ((CLASS2) == FLOAT_REGS			\
+-                               && (!TARGET_MFPGPR || !TARGET_POWERPC64	\
+-				   || ((MODE != DFmode)			\
+-				       && (MODE != DDmode)		\
+-				       && (MODE != DImode))))		\
+-			   || (CLASS1) == ALTIVEC_REGS			\
+-			   || (CLASS2) == ALTIVEC_REGS))
++  rs6000_secondary_memory_needed (CLASS1, CLASS2, MODE)
+ 
+ /* For cpus that cannot load/store SDmode values from the 64-bit
+    FP registers without using a full 64-bit load/store, we need
+@@ -1257,32 +1380,15 @@ enum reg_class
+ /* Return the maximum number of consecutive registers
+    needed to represent mode MODE in a register of class CLASS.
+ 
+-   On RS/6000, this is the size of MODE in words,
+-   except in the FP regs, where a single reg is enough for two words.  */
+-#define CLASS_MAX_NREGS(CLASS, MODE)					\
+- (((CLASS) == FLOAT_REGS) 						\
+-  ? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
+-  : (TARGET_E500_DOUBLE && (CLASS) == GENERAL_REGS			\
+-     && (MODE) == DFmode)				\
+-  ? 1                                                                   \
+-  : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
++   On RS/6000, this is the size of MODE in words, except in the FP regs, where
++   a single reg is enough for two words, unless we have VSX, where the FP
++   registers can hold 128 bits.  */
++#define CLASS_MAX_NREGS(CLASS, MODE) rs6000_class_max_nregs[(MODE)][(CLASS)]
+ 
+ /* Return nonzero if for CLASS a mode change from FROM to TO is invalid.  */
+ 
+ #define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS)			\
+-  (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO)				\
+-   ? ((GET_MODE_SIZE (FROM) < 8 || GET_MODE_SIZE (TO) < 8		\
+-       || TARGET_IEEEQUAD)						\
+-      && reg_classes_intersect_p (FLOAT_REGS, CLASS))			\
+-   : (((TARGET_E500_DOUBLE						\
+-	&& ((((TO) == DFmode) + ((FROM) == DFmode)) == 1		\
+-	    || (((TO) == TFmode) + ((FROM) == TFmode)) == 1		\
+-	    || (((TO) == DDmode) + ((FROM) == DDmode)) == 1		\
+-	    || (((TO) == TDmode) + ((FROM) == TDmode)) == 1		\
+-	    || (((TO) == DImode) + ((FROM) == DImode)) == 1))		\
+-       || (TARGET_SPE							\
+-	   && (SPE_VECTOR_MODE (FROM) + SPE_VECTOR_MODE (TO)) == 1))	\
+-      && reg_classes_intersect_p (GENERAL_REGS, CLASS)))
++  rs6000_cannot_change_mode_class (FROM, TO, CLASS)
+ 
+ /* Stack layout; function entry, exit and calling.  */
+ 
+@@ -1343,8 +1449,8 @@ extern enum rs6000_abi rs6000_current_ab
+ #define STARTING_FRAME_OFFSET						\
+   (FRAME_GROWS_DOWNWARD							\
+    ? 0									\
+-   : (RS6000_ALIGN (crtl->outgoing_args_size,		\
+-		    TARGET_ALTIVEC ? 16 : 8)				\
++   : (RS6000_ALIGN (crtl->outgoing_args_size,				\
++		    (TARGET_ALTIVEC || TARGET_VSX) ? 16 : 8)		\
+       + RS6000_SAVE_AREA))
+ 
+ /* Offset from the stack pointer register to an item dynamically
+@@ -1354,8 +1460,8 @@ extern enum rs6000_abi rs6000_current_ab
+    length of the outgoing arguments.  The default is correct for most
+    machines.  See `function.c' for details.  */
+ #define STACK_DYNAMIC_OFFSET(FUNDECL)					\
+-  (RS6000_ALIGN (crtl->outgoing_args_size,			\
+-		 TARGET_ALTIVEC ? 16 : 8)				\
++  (RS6000_ALIGN (crtl->outgoing_args_size,				\
++		 (TARGET_ALTIVEC || TARGET_VSX) ? 16 : 8)		\
+    + (STACK_POINTER_OFFSET))
+ 
+ /* If we generate an insn to push BYTES bytes,
+@@ -1605,7 +1711,7 @@ typedef struct rs6000_args
+ #define	EPILOGUE_USES(REGNO)					\
+   ((reload_completed && (REGNO) == LR_REGNO)			\
+    || (TARGET_ALTIVEC && (REGNO) == VRSAVE_REGNO)		\
+-   || (crtl->calls_eh_return				\
++   || (crtl->calls_eh_return					\
+        && TARGET_AIX						\
+        && (REGNO) == 2))
+ 
+@@ -2316,7 +2422,24 @@ extern char rs6000_reg_names[][8];	/* re
+   /* no additional names for: mq, lr, ctr, ap */		\
+   {"cr0",  68}, {"cr1",  69}, {"cr2",  70}, {"cr3",  71},	\
+   {"cr4",  72}, {"cr5",  73}, {"cr6",  74}, {"cr7",  75},	\
+-  {"cc",   68}, {"sp",    1}, {"toc",   2} }
++  {"cc",   68}, {"sp",    1}, {"toc",   2},			\
++  /* VSX registers overlaid on top of FR, Altivec registers */	\
++  {"vs0",  32}, {"vs1",  33}, {"vs2",  34}, {"vs3",  35},	\
++  {"vs4",  36}, {"vs5",  37}, {"vs6",  38}, {"vs7",  39},	\
++  {"vs8",  40}, {"vs9",  41}, {"vs10", 42}, {"vs11", 43},	\
++  {"vs12", 44}, {"vs13", 45}, {"vs14", 46}, {"vs15", 47},	\
++  {"vs16", 48}, {"vs17", 49}, {"vs18", 50}, {"vs19", 51},	\
++  {"vs20", 52}, {"vs21", 53}, {"vs22", 54}, {"vs23", 55},	\
++  {"vs24", 56}, {"vs25", 57}, {"vs26", 58}, {"vs27", 59},	\
++  {"vs28", 60}, {"vs29", 61}, {"vs30", 62}, {"vs31", 63},	\
++  {"vs32", 77}, {"vs33", 78}, {"vs34", 79}, {"vs35", 80},       \
++  {"vs36", 81}, {"vs37", 82}, {"vs38", 83}, {"vs39", 84},       \
++  {"vs40", 85}, {"vs41", 86}, {"vs42", 87}, {"vs43", 88},       \
++  {"vs44", 89}, {"vs45", 90}, {"vs46", 91}, {"vs47", 92},       \
++  {"vs48", 93}, {"vs49", 94}, {"vs50", 95}, {"vs51", 96},       \
++  {"vs52", 97}, {"vs53", 98}, {"vs54", 99}, {"vs55", 100},	\
++  {"vs56", 101},{"vs57", 102},{"vs58", 103},{"vs59", 104},      \
++  {"vs60", 105},{"vs61", 106},{"vs62", 107},{"vs63", 108} }
+ 
+ /* Text to write out after a CALL that may be replaced by glue code by
+    the loader.  This depends on the AIX version.  */
+@@ -2480,10 +2603,14 @@ enum rs6000_builtins
+   ALTIVEC_BUILTIN_VSEL_4SF,
+   ALTIVEC_BUILTIN_VSEL_8HI,
+   ALTIVEC_BUILTIN_VSEL_16QI,
++  ALTIVEC_BUILTIN_VSEL_2DF,		/* needed for VSX */
++  ALTIVEC_BUILTIN_VSEL_2DI,		/* needed for VSX */
+   ALTIVEC_BUILTIN_VPERM_4SI,
+   ALTIVEC_BUILTIN_VPERM_4SF,
+   ALTIVEC_BUILTIN_VPERM_8HI,
+   ALTIVEC_BUILTIN_VPERM_16QI,
++  ALTIVEC_BUILTIN_VPERM_2DF,		/* needed for VSX */
++  ALTIVEC_BUILTIN_VPERM_2DI,		/* needed for VSX */
+   ALTIVEC_BUILTIN_VPKUHUM,
+   ALTIVEC_BUILTIN_VPKUWUM,
+   ALTIVEC_BUILTIN_VPKPX,
+@@ -3110,6 +3237,163 @@ enum rs6000_builtins
+   RS6000_BUILTIN_RECIPF,
+   RS6000_BUILTIN_RSQRTF,
+ 
++  /* VSX builtins.  */
++  VSX_BUILTIN_LXSDUX,
++  VSX_BUILTIN_LXSDX,
++  VSX_BUILTIN_LXVD2UX,
++  VSX_BUILTIN_LXVD2X,
++  VSX_BUILTIN_LXVDSX,
++  VSX_BUILTIN_LXVW4UX,
++  VSX_BUILTIN_LXVW4X,
++  VSX_BUILTIN_STXSDUX,
++  VSX_BUILTIN_STXSDX,
++  VSX_BUILTIN_STXVD2UX,
++  VSX_BUILTIN_STXVD2X,
++  VSX_BUILTIN_STXVW4UX,
++  VSX_BUILTIN_STXVW4X,
++  VSX_BUILTIN_XSABSDP,
++  VSX_BUILTIN_XSADDDP,
++  VSX_BUILTIN_XSCMPODP,
++  VSX_BUILTIN_XSCMPUDP,
++  VSX_BUILTIN_XSCPSGNDP,
++  VSX_BUILTIN_XSCVDPSP,
++  VSX_BUILTIN_XSCVDPSXDS,
++  VSX_BUILTIN_XSCVDPSXWS,
++  VSX_BUILTIN_XSCVDPUXDS,
++  VSX_BUILTIN_XSCVDPUXWS,
++  VSX_BUILTIN_XSCVSPDP,
++  VSX_BUILTIN_XSCVSXDDP,
++  VSX_BUILTIN_XSCVUXDDP,
++  VSX_BUILTIN_XSDIVDP,
++  VSX_BUILTIN_XSMADDADP,
++  VSX_BUILTIN_XSMADDMDP,
++  VSX_BUILTIN_XSMAXDP,
++  VSX_BUILTIN_XSMINDP,
++  VSX_BUILTIN_XSMOVDP,
++  VSX_BUILTIN_XSMSUBADP,
++  VSX_BUILTIN_XSMSUBMDP,
++  VSX_BUILTIN_XSMULDP,
++  VSX_BUILTIN_XSNABSDP,
++  VSX_BUILTIN_XSNEGDP,
++  VSX_BUILTIN_XSNMADDADP,
++  VSX_BUILTIN_XSNMADDMDP,
++  VSX_BUILTIN_XSNMSUBADP,
++  VSX_BUILTIN_XSNMSUBMDP,
++  VSX_BUILTIN_XSRDPI,
++  VSX_BUILTIN_XSRDPIC,
++  VSX_BUILTIN_XSRDPIM,
++  VSX_BUILTIN_XSRDPIP,
++  VSX_BUILTIN_XSRDPIZ,
++  VSX_BUILTIN_XSREDP,
++  VSX_BUILTIN_XSRSQRTEDP,
++  VSX_BUILTIN_XSSQRTDP,
++  VSX_BUILTIN_XSSUBDP,
++  VSX_BUILTIN_XSTDIVDP,
++  VSX_BUILTIN_XSTSQRTDP,
++  VSX_BUILTIN_XVABSDP,
++  VSX_BUILTIN_XVABSSP,
++  VSX_BUILTIN_XVADDDP,
++  VSX_BUILTIN_XVADDSP,
++  VSX_BUILTIN_XVCMPEQDP,
++  VSX_BUILTIN_XVCMPEQSP,
++  VSX_BUILTIN_XVCMPGEDP,
++  VSX_BUILTIN_XVCMPGESP,
++  VSX_BUILTIN_XVCMPGTDP,
++  VSX_BUILTIN_XVCMPGTSP,
++  VSX_BUILTIN_XVCPSGNDP,
++  VSX_BUILTIN_XVCPSGNSP,
++  VSX_BUILTIN_XVCVDPSP,
++  VSX_BUILTIN_XVCVDPSXDS,
++  VSX_BUILTIN_XVCVDPSXWS,
++  VSX_BUILTIN_XVCVDPUXDS,
++  VSX_BUILTIN_XVCVDPUXWS,
++  VSX_BUILTIN_XVCVSPDP,
++  VSX_BUILTIN_XVCVSPSXDS,
++  VSX_BUILTIN_XVCVSPSXWS,
++  VSX_BUILTIN_XVCVSPUXDS,
++  VSX_BUILTIN_XVCVSPUXWS,
++  VSX_BUILTIN_XVCVSXDDP,
++  VSX_BUILTIN_XVCVSXDSP,
++  VSX_BUILTIN_XVCVSXWDP,
++  VSX_BUILTIN_XVCVSXWSP,
++  VSX_BUILTIN_XVCVUXDDP,
++  VSX_BUILTIN_XVCVUXDSP,
++  VSX_BUILTIN_XVCVUXWDP,
++  VSX_BUILTIN_XVCVUXWSP,
++  VSX_BUILTIN_XVDIVDP,
++  VSX_BUILTIN_XVDIVSP,
++  VSX_BUILTIN_XVMADDADP,
++  VSX_BUILTIN_XVMADDASP,
++  VSX_BUILTIN_XVMADDMDP,
++  VSX_BUILTIN_XVMADDMSP,
++  VSX_BUILTIN_XVMAXDP,
++  VSX_BUILTIN_XVMAXSP,
++  VSX_BUILTIN_XVMINDP,
++  VSX_BUILTIN_XVMINSP,
++  VSX_BUILTIN_XVMOVDP,
++  VSX_BUILTIN_XVMOVSP,
++  VSX_BUILTIN_XVMSUBADP,
++  VSX_BUILTIN_XVMSUBASP,
++  VSX_BUILTIN_XVMSUBMDP,
++  VSX_BUILTIN_XVMSUBMSP,
++  VSX_BUILTIN_XVMULDP,
++  VSX_BUILTIN_XVMULSP,
++  VSX_BUILTIN_XVNABSDP,
++  VSX_BUILTIN_XVNABSSP,
++  VSX_BUILTIN_XVNEGDP,
++  VSX_BUILTIN_XVNEGSP,
++  VSX_BUILTIN_XVNMADDADP,
++  VSX_BUILTIN_XVNMADDASP,
++  VSX_BUILTIN_XVNMADDMDP,
++  VSX_BUILTIN_XVNMADDMSP,
++  VSX_BUILTIN_XVNMSUBADP,
++  VSX_BUILTIN_XVNMSUBASP,
++  VSX_BUILTIN_XVNMSUBMDP,
++  VSX_BUILTIN_XVNMSUBMSP,
++  VSX_BUILTIN_XVRDPI,
++  VSX_BUILTIN_XVRDPIC,
++  VSX_BUILTIN_XVRDPIM,
++  VSX_BUILTIN_XVRDPIP,
++  VSX_BUILTIN_XVRDPIZ,
++  VSX_BUILTIN_XVREDP,
++  VSX_BUILTIN_XVRESP,
++  VSX_BUILTIN_XVRSPI,
++  VSX_BUILTIN_XVRSPIC,
++  VSX_BUILTIN_XVRSPIM,
++  VSX_BUILTIN_XVRSPIP,
++  VSX_BUILTIN_XVRSPIZ,
++  VSX_BUILTIN_XVRSQRTEDP,
++  VSX_BUILTIN_XVRSQRTESP,
++  VSX_BUILTIN_XVSQRTDP,
++  VSX_BUILTIN_XVSQRTSP,
++  VSX_BUILTIN_XVSUBDP,
++  VSX_BUILTIN_XVSUBSP,
++  VSX_BUILTIN_XVTDIVDP,
++  VSX_BUILTIN_XVTDIVSP,
++  VSX_BUILTIN_XVTSQRTDP,
++  VSX_BUILTIN_XVTSQRTSP,
++  VSX_BUILTIN_XXLAND,
++  VSX_BUILTIN_XXLANDC,
++  VSX_BUILTIN_XXLNOR,
++  VSX_BUILTIN_XXLOR,
++  VSX_BUILTIN_XXLXOR,
++  VSX_BUILTIN_XXMRGHD,
++  VSX_BUILTIN_XXMRGHW,
++  VSX_BUILTIN_XXMRGLD,
++  VSX_BUILTIN_XXMRGLW,
++  VSX_BUILTIN_XXPERMDI,
++  VSX_BUILTIN_XXSEL,
++  VSX_BUILTIN_XXSLDWI,
++  VSX_BUILTIN_XXSPLTD,
++  VSX_BUILTIN_XXSPLTW,
++  VSX_BUILTIN_XXSWAPD,
++
++  /* Combine VSX/Altivec builtins.  */
++  VECTOR_BUILTIN_FLOAT_V4SI_V4SF,
++  VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF,
++  VECTOR_BUILTIN_FIX_V4SF_V4SI,
++  VECTOR_BUILTIN_FIXUNS_V4SF_V4SI,
++
+   RS6000_BUILTIN_COUNT
+ };
+ 
+@@ -3123,6 +3407,8 @@ enum rs6000_builtin_type_index
+   RS6000_BTI_V16QI,
+   RS6000_BTI_V2SI,
+   RS6000_BTI_V2SF,
++  RS6000_BTI_V2DI,
++  RS6000_BTI_V2DF,
+   RS6000_BTI_V4HI,
+   RS6000_BTI_V4SI,
+   RS6000_BTI_V4SF,
+@@ -3146,7 +3432,10 @@ enum rs6000_builtin_type_index
+   RS6000_BTI_UINTHI,		 /* unsigned_intHI_type_node */
+   RS6000_BTI_INTSI,		 /* intSI_type_node */
+   RS6000_BTI_UINTSI,		 /* unsigned_intSI_type_node */
++  RS6000_BTI_INTDI,		 /* intDI_type_node */
++  RS6000_BTI_UINTDI,		 /* unsigned_intDI_type_node */
+   RS6000_BTI_float,	         /* float_type_node */
++  RS6000_BTI_double,	         /* double_type_node */
+   RS6000_BTI_void,	         /* void_type_node */
+   RS6000_BTI_MAX
+ };
+@@ -3157,6 +3446,8 @@ enum rs6000_builtin_type_index
+ #define opaque_p_V2SI_type_node       (rs6000_builtin_types[RS6000_BTI_opaque_p_V2SI])
+ #define opaque_V4SI_type_node         (rs6000_builtin_types[RS6000_BTI_opaque_V4SI])
+ #define V16QI_type_node               (rs6000_builtin_types[RS6000_BTI_V16QI])
++#define V2DI_type_node                (rs6000_builtin_types[RS6000_BTI_V2DI])
++#define V2DF_type_node                (rs6000_builtin_types[RS6000_BTI_V2DF])
+ #define V2SI_type_node                (rs6000_builtin_types[RS6000_BTI_V2SI])
+ #define V2SF_type_node                (rs6000_builtin_types[RS6000_BTI_V2SF])
+ #define V4HI_type_node                (rs6000_builtin_types[RS6000_BTI_V4HI])
+@@ -3183,7 +3474,10 @@ enum rs6000_builtin_type_index
+ #define uintHI_type_internal_node	 (rs6000_builtin_types[RS6000_BTI_UINTHI])
+ #define intSI_type_internal_node	 (rs6000_builtin_types[RS6000_BTI_INTSI])
+ #define uintSI_type_internal_node	 (rs6000_builtin_types[RS6000_BTI_UINTSI])
++#define intDI_type_internal_node	 (rs6000_builtin_types[RS6000_BTI_INTDI])
++#define uintDI_type_internal_node	 (rs6000_builtin_types[RS6000_BTI_UINTDI])
+ #define float_type_internal_node	 (rs6000_builtin_types[RS6000_BTI_float])
++#define double_type_internal_node	 (rs6000_builtin_types[RS6000_BTI_double])
+ #define void_type_internal_node		 (rs6000_builtin_types[RS6000_BTI_void])
+ 
+ extern GTY(()) tree rs6000_builtin_types[RS6000_BTI_MAX];
+--- gcc/config/rs6000/altivec.md	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/altivec.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -21,18 +21,7 @@
+ 
+ (define_constants
+   [(UNSPEC_VCMPBFP       50)
+-   (UNSPEC_VCMPEQUB      51)
+-   (UNSPEC_VCMPEQUH      52)
+-   (UNSPEC_VCMPEQUW      53)
+-   (UNSPEC_VCMPEQFP      54)
+-   (UNSPEC_VCMPGEFP      55)
+-   (UNSPEC_VCMPGTUB      56)
+-   (UNSPEC_VCMPGTSB      57)
+-   (UNSPEC_VCMPGTUH      58)
+-   (UNSPEC_VCMPGTSH      59)
+-   (UNSPEC_VCMPGTUW      60)
+-   (UNSPEC_VCMPGTSW      61)
+-   (UNSPEC_VCMPGTFP      62)
++   ;; 51-62 deleted
+    (UNSPEC_VMSUMU        65)
+    (UNSPEC_VMSUMM        66)
+    (UNSPEC_VMSUMSHM      68)
+@@ -87,10 +76,7 @@ (define_constants
+    (UNSPEC_VEXPTEFP     156)
+    (UNSPEC_VRSQRTEFP    157)
+    (UNSPEC_VREFP        158)
+-   (UNSPEC_VSEL4SI      159)
+-   (UNSPEC_VSEL4SF      160)
+-   (UNSPEC_VSEL8HI      161)
+-   (UNSPEC_VSEL16QI     162)
++   ;; 159-162 deleted
+    (UNSPEC_VLSDOI       163)
+    (UNSPEC_VUPKHSB      167)
+    (UNSPEC_VUPKHPX      168)
+@@ -125,11 +111,11 @@ (define_constants
+    (UNSPEC_INTERHI_V4SI 228)
+    (UNSPEC_INTERHI_V8HI 229)
+    (UNSPEC_INTERHI_V16QI 230)
+-   (UNSPEC_INTERHI_V4SF 231)
++   ;; delete 231
+    (UNSPEC_INTERLO_V4SI 232)
+    (UNSPEC_INTERLO_V8HI 233)
+    (UNSPEC_INTERLO_V16QI 234)
+-   (UNSPEC_INTERLO_V4SF 235)
++   ;; delete 235
+    (UNSPEC_LVLX         236)
+    (UNSPEC_LVLXL        237)
+    (UNSPEC_LVRX         238)
+@@ -176,39 +162,17 @@ (define_mode_iterator VIshort [V8HI V16Q
+ (define_mode_iterator VF [V4SF])
+ ;; Vec modes, pity mode iterators are not composable
+ (define_mode_iterator V [V4SI V8HI V16QI V4SF])
++;; Vec modes for move/logical/permute ops, include vector types for move not
++;; otherwise handled by altivec (v2df, v2di, ti)
++(define_mode_iterator VM [V4SI V8HI V16QI V4SF V2DF V2DI TI])
+ 
+ (define_mode_attr VI_char [(V4SI "w") (V8HI "h") (V16QI "b")])
+ 
+-;; Generic LVX load instruction.
+-(define_insn "altivec_lvx_<mode>"
+-  [(set (match_operand:V 0 "altivec_register_operand" "=v")
+-	(match_operand:V 1 "memory_operand" "Z"))]
+-  "TARGET_ALTIVEC"
+-  "lvx %0,%y1"
+-  [(set_attr "type" "vecload")])
+-
+-;; Generic STVX store instruction.
+-(define_insn "altivec_stvx_<mode>"
+-  [(set (match_operand:V 0 "memory_operand" "=Z")
+-	(match_operand:V 1 "altivec_register_operand" "v"))]
+-  "TARGET_ALTIVEC"
+-  "stvx %1,%y0"
+-  [(set_attr "type" "vecstore")])
+-
+ ;; Vector move instructions.
+-(define_expand "mov<mode>"
+-  [(set (match_operand:V 0 "nonimmediate_operand" "")
+-	(match_operand:V 1 "any_operand" ""))]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_emit_move (operands[0], operands[1], <MODE>mode);
+-  DONE;
+-})
+-
+-(define_insn "*mov<mode>_internal"
+-  [(set (match_operand:V 0 "nonimmediate_operand" "=Z,v,v,o,r,r,v")
+-	(match_operand:V 1 "input_operand" "v,Z,v,r,o,r,W"))]
+-  "TARGET_ALTIVEC 
++(define_insn "*altivec_mov<mode>"
++  [(set (match_operand:V 0 "nonimmediate_operand" "=Z,v,v,*o,*r,*r,v,v")
++	(match_operand:V 1 "input_operand" "v,Z,v,r,o,r,j,W"))]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
+    && (register_operand (operands[0], <MODE>mode) 
+        || register_operand (operands[1], <MODE>mode))"
+ {
+@@ -220,52 +184,17 @@ (define_insn "*mov<mode>_internal"
+     case 3: return "#";
+     case 4: return "#";
+     case 5: return "#";
+-    case 6: return output_vec_const_move (operands);
++    case 6: return "vxor %0,%0,%0";
++    case 7: return output_vec_const_move (operands);
+     default: gcc_unreachable ();
+     }
+ }
+-  [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,*")])
+-
+-(define_split
+-  [(set (match_operand:V4SI 0 "nonimmediate_operand" "")
+-        (match_operand:V4SI 1 "input_operand" ""))]
+-  "TARGET_ALTIVEC && reload_completed
+-   && gpr_or_gpr_p (operands[0], operands[1])"
+-  [(pc)]
+-{
+-  rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+-})
+-
+-(define_split
+-  [(set (match_operand:V8HI 0 "nonimmediate_operand" "")
+-        (match_operand:V8HI 1 "input_operand" ""))]
+-  "TARGET_ALTIVEC && reload_completed
+-   && gpr_or_gpr_p (operands[0], operands[1])"
+-  [(pc)]
+-{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
++  [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,vecsimple,*")])
+ 
+ (define_split
+-  [(set (match_operand:V16QI 0 "nonimmediate_operand" "")
+-        (match_operand:V16QI 1 "input_operand" ""))]
+-  "TARGET_ALTIVEC && reload_completed
+-   && gpr_or_gpr_p (operands[0], operands[1])"
+-  [(pc)]
+-{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+-
+-(define_split
+-  [(set (match_operand:V4SF 0 "nonimmediate_operand" "")
+-        (match_operand:V4SF 1 "input_operand" ""))]
+-  "TARGET_ALTIVEC && reload_completed
+-   && gpr_or_gpr_p (operands[0], operands[1])"
+-  [(pc)]
+-{
+-  rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+-})
+-
+-(define_split
+-  [(set (match_operand:V 0 "altivec_register_operand" "")
+-	(match_operand:V 1 "easy_vector_constant_add_self" ""))]
+-  "TARGET_ALTIVEC && reload_completed"
++  [(set (match_operand:VM 0 "altivec_register_operand" "")
++	(match_operand:VM 1 "easy_vector_constant_add_self" ""))]
++  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode) && reload_completed"
+   [(set (match_dup 0) (match_dup 3))
+    (set (match_dup 0) (match_dup 4))]
+ {
+@@ -346,11 +275,11 @@ (define_insn "add<mode>3"
+   "vaddu<VI_char>m %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "addv4sf3"
++(define_insn "*altivec_addv4sf3"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+         (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ 	 	   (match_operand:V4SF 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vaddfp %0,%1,%2"
+   [(set_attr "type" "vecfloat")])
+ 
+@@ -392,11 +321,11 @@ (define_insn "sub<mode>3"
+   "vsubu<VI_char>m %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "subv4sf3"
++(define_insn "*altivec_subv4sf3"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+         (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+                     (match_operand:V4SF 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vsubfp %0,%1,%2"
+   [(set_attr "type" "vecfloat")])
+ 
+@@ -457,131 +386,81 @@ (define_insn "altivec_vcmpbfp"
+   "vcmpbfp %0,%1,%2"
+   [(set_attr "type" "veccmp")])
+ 
+-(define_insn "altivec_vcmpequb"
+-  [(set (match_operand:V16QI 0 "register_operand" "=v")
+-        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+-                       (match_operand:V16QI 2 "register_operand" "v")] 
+-                       UNSPEC_VCMPEQUB))]
++(define_insn "*altivec_eq<mode>"
++  [(set (match_operand:VI 0 "altivec_register_operand" "=v")
++	(eq:VI (match_operand:VI 1 "altivec_register_operand" "v")
++	       (match_operand:VI 2 "altivec_register_operand" "v")))]
+   "TARGET_ALTIVEC"
+-  "vcmpequb %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
++  "vcmpequ<VI_char> %0,%1,%2"
++  [(set_attr "type" "veccmp")])
+ 
+-(define_insn "altivec_vcmpequh"
+-  [(set (match_operand:V8HI 0 "register_operand" "=v")
+-        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+-                      (match_operand:V8HI 2 "register_operand" "v")] 
+-                      UNSPEC_VCMPEQUH))]
++(define_insn "*altivec_gt<mode>"
++  [(set (match_operand:VI 0 "altivec_register_operand" "=v")
++	(gt:VI (match_operand:VI 1 "altivec_register_operand" "v")
++	       (match_operand:VI 2 "altivec_register_operand" "v")))]
+   "TARGET_ALTIVEC"
+-  "vcmpequh %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
++  "vcmpgts<VI_char> %0,%1,%2"
++  [(set_attr "type" "veccmp")])
+ 
+-(define_insn "altivec_vcmpequw"
+-  [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+-                      (match_operand:V4SI 2 "register_operand" "v")] 
+-	              UNSPEC_VCMPEQUW))]
++(define_insn "*altivec_gtu<mode>"
++  [(set (match_operand:VI 0 "altivec_register_operand" "=v")
++	(gtu:VI (match_operand:VI 1 "altivec_register_operand" "v")
++		(match_operand:VI 2 "altivec_register_operand" "v")))]
+   "TARGET_ALTIVEC"
+-  "vcmpequw %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
++  "vcmpgtu<VI_char> %0,%1,%2"
++  [(set_attr "type" "veccmp")])
+ 
+-(define_insn "altivec_vcmpeqfp"
+-  [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+-                      (match_operand:V4SF 2 "register_operand" "v")] 
+-	              UNSPEC_VCMPEQFP))]
+-  "TARGET_ALTIVEC"
++(define_insn "*altivec_eqv4sf"
++  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
++	(eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
++		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vcmpeqfp %0,%1,%2"
+   [(set_attr "type" "veccmp")])
+ 
+-(define_insn "altivec_vcmpgefp"
+-  [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+-                      (match_operand:V4SF 2 "register_operand" "v")] 
+-		     UNSPEC_VCMPGEFP))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgefp %0,%1,%2"
++(define_insn "*altivec_gtv4sf"
++  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
++	(gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
++		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
++  "vcmpgtfp %0,%1,%2"
+   [(set_attr "type" "veccmp")])
+ 
+-(define_insn "altivec_vcmpgtub"
+-  [(set (match_operand:V16QI 0 "register_operand" "=v")
+-        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+-                       (match_operand:V16QI 2 "register_operand" "v")] 
+-		      UNSPEC_VCMPGTUB))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgtub %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "altivec_vcmpgtsb"
+-  [(set (match_operand:V16QI 0 "register_operand" "=v")
+-        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+-                       (match_operand:V16QI 2 "register_operand" "v")] 
+-		      UNSPEC_VCMPGTSB))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgtsb %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "altivec_vcmpgtuh"
+-  [(set (match_operand:V8HI 0 "register_operand" "=v")
+-        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+-                      (match_operand:V8HI 2 "register_operand" "v")] 
+-		     UNSPEC_VCMPGTUH))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgtuh %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "altivec_vcmpgtsh"
+-  [(set (match_operand:V8HI 0 "register_operand" "=v")
+-        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+-                      (match_operand:V8HI 2 "register_operand" "v")] 
+-		     UNSPEC_VCMPGTSH))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgtsh %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "altivec_vcmpgtuw"
+-  [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+-                      (match_operand:V4SI 2 "register_operand" "v")] 
+-		     UNSPEC_VCMPGTUW))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgtuw %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "altivec_vcmpgtsw"
+-  [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+-                      (match_operand:V4SI 2 "register_operand" "v")] 
+-		     UNSPEC_VCMPGTSW))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgtsw %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "altivec_vcmpgtfp"
+-  [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+-                      (match_operand:V4SF 2 "register_operand" "v")] 
+-		     UNSPEC_VCMPGTFP))]
+-  "TARGET_ALTIVEC"
+-  "vcmpgtfp %0,%1,%2"
++(define_insn "*altivec_gev4sf"
++  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
++	(ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
++		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
++  "vcmpgefp %0,%1,%2"
+   [(set_attr "type" "veccmp")])
+ 
++(define_insn "altivec_vsel<mode>"
++  [(set (match_operand:VM 0 "altivec_register_operand" "=v")
++	(if_then_else:VM (ne (match_operand:VM 1 "altivec_register_operand" "v")
++			     (const_int 0))
++			 (match_operand:VM 2 "altivec_register_operand" "v")
++			 (match_operand:VM 3 "altivec_register_operand" "v")))]
++  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
++  "vsel %0,%3,%2,%1"
++  [(set_attr "type" "vecperm")])
++
+ ;; Fused multiply add
+ (define_insn "altivec_vmaddfp"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+ 	(plus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ 			      (match_operand:V4SF 2 "register_operand" "v"))
+ 	  	   (match_operand:V4SF 3 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vmaddfp %0,%1,%2,%3"
+   [(set_attr "type" "vecfloat")])
+ 
+ ;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
+ 
+-(define_expand "mulv4sf3"
++(define_expand "altivec_mulv4sf3"
+   [(use (match_operand:V4SF 0 "register_operand" ""))
+    (use (match_operand:V4SF 1 "register_operand" ""))
+    (use (match_operand:V4SF 2 "register_operand" ""))]
+-  "TARGET_ALTIVEC && TARGET_FUSED_MADD"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode) && TARGET_FUSED_MADD"
+   "
+ {
+   rtx neg0;
+@@ -684,7 +563,7 @@ (define_insn "altivec_vnmsubfp"
+ 	(neg:V4SF (minus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ 			       (match_operand:V4SF 2 "register_operand" "v"))
+ 	  	    (match_operand:V4SF 3 "register_operand" "v"))))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vnmsubfp %0,%1,%2,%3"
+   [(set_attr "type" "vecfloat")])
+ 
+@@ -758,11 +637,11 @@ (define_insn "smax<mode>3"
+   "vmaxs<VI_char> %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "smaxv4sf3"
++(define_insn "*altivec_smaxv4sf3"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+         (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
+                    (match_operand:V4SF 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vmaxfp %0,%1,%2"
+   [(set_attr "type" "veccmp")])
+ 
+@@ -782,11 +661,11 @@ (define_insn "smin<mode>3"
+   "vmins<VI_char> %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "sminv4sf3"
++(define_insn "*altivec_sminv4sf3"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+         (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
+                    (match_operand:V4SF 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vminfp %0,%1,%2"
+   [(set_attr "type" "veccmp")])
+ 
+@@ -905,7 +784,7 @@ (define_insn "altivec_vmrghw"
+   "vmrghw %0,%1,%2"
+   [(set_attr "type" "vecperm")])
+ 
+-(define_insn "altivec_vmrghsf"
++(define_insn "*altivec_vmrghsf"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+         (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
+                                          (parallel [(const_int 0)
+@@ -918,7 +797,7 @@ (define_insn "altivec_vmrghsf"
+                                                     (const_int 3)
+                                                     (const_int 1)]))
+                       (const_int 5)))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vmrghw %0,%1,%2"
+   [(set_attr "type" "vecperm")])
+ 
+@@ -990,35 +869,37 @@ (define_insn "altivec_vmrglh"
+ 
+ (define_insn "altivec_vmrglw"
+   [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+-					 (parallel [(const_int 2)
+-					 	    (const_int 0)
+-						    (const_int 3)
+-						    (const_int 1)]))
+-                        (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
+-					 (parallel [(const_int 0)
+-					 	    (const_int 2)
+-						    (const_int 1)
+-						    (const_int 3)]))
+-		      (const_int 5)))]
++        (vec_merge:V4SI
++	 (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
++			  (parallel [(const_int 2)
++				     (const_int 0)
++				     (const_int 3)
++				     (const_int 1)]))
++	 (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
++			  (parallel [(const_int 0)
++				     (const_int 2)
++				     (const_int 1)
++				     (const_int 3)]))
++	 (const_int 5)))]
+   "TARGET_ALTIVEC"
+   "vmrglw %0,%1,%2"
+   [(set_attr "type" "vecperm")])
+ 
+-(define_insn "altivec_vmrglsf"
++(define_insn "*altivec_vmrglsf"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+-        (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
+-                                         (parallel [(const_int 2)
+-                                                    (const_int 0)
+-                                                    (const_int 3)
+-                                                    (const_int 1)]))
+-                        (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
+-                                         (parallel [(const_int 0)
+-                                                    (const_int 2)
+-                                                    (const_int 1)
+-                                                    (const_int 3)]))
+-                      (const_int 5)))]
+-  "TARGET_ALTIVEC"
++        (vec_merge:V4SF
++	 (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
++			  (parallel [(const_int 2)
++				     (const_int 0)
++				     (const_int 3)
++				     (const_int 1)]))
++	 (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
++			  (parallel [(const_int 0)
++				     (const_int 2)
++				     (const_int 1)
++				     (const_int 3)]))
++	 (const_int 5)))]
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vmrglw %0,%1,%2"
+   [(set_attr "type" "vecperm")])
+ 
+@@ -1095,68 +976,53 @@ (define_insn "altivec_vmulosh"
+   [(set_attr "type" "veccomplex")])
+ 
+ 
+-;; logical ops
++;; logical ops.  Have the logical ops follow the memory ops in
++;; terms of whether to prefer VSX or Altivec
+ 
+-(define_insn "and<mode>3"
+-  [(set (match_operand:VI 0 "register_operand" "=v")
+-        (and:VI (match_operand:VI 1 "register_operand" "v")
+-                (match_operand:VI 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++(define_insn "*altivec_and<mode>3"
++  [(set (match_operand:VM 0 "register_operand" "=v")
++        (and:VM (match_operand:VM 1 "register_operand" "v")
++		(match_operand:VM 2 "register_operand" "v")))]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
+   "vand %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "ior<mode>3"
+-  [(set (match_operand:VI 0 "register_operand" "=v")
+-        (ior:VI (match_operand:VI 1 "register_operand" "v")
+-                (match_operand:VI 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++(define_insn "*altivec_ior<mode>3"
++  [(set (match_operand:VM 0 "register_operand" "=v")
++        (ior:VM (match_operand:VM 1 "register_operand" "v")
++		(match_operand:VM 2 "register_operand" "v")))]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
+   "vor %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "xor<mode>3"
+-  [(set (match_operand:VI 0 "register_operand" "=v")
+-        (xor:VI (match_operand:VI 1 "register_operand" "v")
+-                (match_operand:VI 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++(define_insn "*altivec_xor<mode>3"
++  [(set (match_operand:VM 0 "register_operand" "=v")
++        (xor:VM (match_operand:VM 1 "register_operand" "v")
++		(match_operand:VM 2 "register_operand" "v")))]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
+   "vxor %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "xorv4sf3"
+-  [(set (match_operand:V4SF 0 "register_operand" "=v")
+-        (xor:V4SF (match_operand:V4SF 1 "register_operand" "v")
+-                  (match_operand:V4SF 2 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
+-  "vxor %0,%1,%2" 
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "one_cmpl<mode>2"
+-  [(set (match_operand:VI 0 "register_operand" "=v")
+-        (not:VI (match_operand:VI 1 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++(define_insn "*altivec_one_cmpl<mode>2"
++  [(set (match_operand:VM 0 "register_operand" "=v")
++        (not:VM (match_operand:VM 1 "register_operand" "v")))]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
+   "vnor %0,%1,%1"
+   [(set_attr "type" "vecsimple")])
+   
+-(define_insn "altivec_nor<mode>3"
+-  [(set (match_operand:VI 0 "register_operand" "=v")
+-        (not:VI (ior:VI (match_operand:VI 1 "register_operand" "v")
+-                        (match_operand:VI 2 "register_operand" "v"))))]
+-  "TARGET_ALTIVEC"
++(define_insn "*altivec_nor<mode>3"
++  [(set (match_operand:VM 0 "register_operand" "=v")
++        (not:VM (ior:VM (match_operand:VM 1 "register_operand" "v")
++			(match_operand:VM 2 "register_operand" "v"))))]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
+   "vnor %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+-(define_insn "andc<mode>3"
+-  [(set (match_operand:VI 0 "register_operand" "=v")
+-        (and:VI (not:VI (match_operand:VI 2 "register_operand" "v"))
+-                (match_operand:VI 1 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
+-  "vandc %0,%1,%2"
+-  [(set_attr "type" "vecsimple")])
+-
+-(define_insn "*andc3_v4sf"
+-  [(set (match_operand:V4SF 0 "register_operand" "=v")
+-        (and:V4SF (not:V4SF (match_operand:V4SF 2 "register_operand" "v"))
+-                  (match_operand:V4SF 1 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++(define_insn "*altivec_andc<mode>3"
++  [(set (match_operand:VM 0 "register_operand" "=v")
++        (and:VM (not:VM (match_operand:VM 2 "register_operand" "v"))
++		(match_operand:VM 1 "register_operand" "v")))]
++  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
+   "vandc %0,%1,%2"
+   [(set_attr "type" "vecsimple")])
+ 
+@@ -1392,7 +1258,7 @@ (define_insn "*altivec_vspltsf"
+ 	 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
+ 			(parallel
+ 			 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vspltw %0,%1,%2"
+   [(set_attr "type" "vecperm")])
+ 
+@@ -1404,19 +1270,19 @@ (define_insn "altivec_vspltis<VI_char>"
+   "vspltis<VI_char> %0,%1"
+   [(set_attr "type" "vecperm")])
+ 
+-(define_insn "ftruncv4sf2"
++(define_insn "*altivec_ftruncv4sf2"
+   [(set (match_operand:V4SF 0 "register_operand" "=v")
+   	(fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
+-  "TARGET_ALTIVEC"
++  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
+   "vrfiz %0,%1"
+   [(set_attr "type" "vecfloat")])
+ 
+ (define_insn "altivec_vperm_<mode>"
+-  [(set (match_operand:V 0 "register_operand" "=v")
+-	(unspec:V [(match_operand:V 1 "register_operand" "v")
+-		   (match_operand:V 2 "register_operand" "v")
+-		   (match_operand:V16QI 3 "register_operand" "v")]
+-		  UNSPEC_VPERM))]
++  [(set (match_operand:VM 0 "register_operand" "=v")
++	(unspec:VM [(match_operand:VM 1 "register_operand" "v")
++		    (match_operand:VM 2 "register_operand" "v")
++		    (match_operand:V16QI 3 "register_operand" "v")]
++		   UNSPEC_VPERM))]
+   "TARGET_ALTIVEC"
+   "vperm %0,%1,%2,%3"
+   [(set_attr "type" "vecperm")])
+@@ -1515,180 +1381,6 @@ (define_insn "altivec_vrefp"
+   "vrefp %0,%1"
+   [(set_attr "type" "vecfloat")])
+ 
+-(define_expand "vcondv4si"
+-        [(set (match_operand:V4SI 0 "register_operand" "=v")
+-              (if_then_else:V4SI
+-                (match_operator 3 "comparison_operator"
+-                  [(match_operand:V4SI 4 "register_operand" "v")
+-                   (match_operand:V4SI 5 "register_operand" "v")])
+-               (match_operand:V4SI 1 "register_operand" "v")
+-               (match_operand:V4SI 2 "register_operand" "v")))]
+-	"TARGET_ALTIVEC"
+-	"
+-{
+-	if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+-					  operands[3], operands[4], operands[5]))
+-	DONE;
+-	else
+-	FAIL;
+-}
+-	")
+-
+-(define_expand "vconduv4si"
+-        [(set (match_operand:V4SI 0 "register_operand" "=v")
+-              (if_then_else:V4SI
+-                (match_operator 3 "comparison_operator"
+-                  [(match_operand:V4SI 4 "register_operand" "v")
+-                   (match_operand:V4SI 5 "register_operand" "v")])
+-               (match_operand:V4SI 1 "register_operand" "v")
+-               (match_operand:V4SI 2 "register_operand" "v")))]
+-	"TARGET_ALTIVEC"
+-	"
+-{
+-	if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+-					  operands[3], operands[4], operands[5]))
+-	DONE;
+-	else
+-	FAIL;
+-}
+-	")
+-
+-(define_expand "vcondv4sf"
+-        [(set (match_operand:V4SF 0 "register_operand" "=v")
+-              (if_then_else:V4SF
+-                (match_operator 3 "comparison_operator"
+-                  [(match_operand:V4SF 4 "register_operand" "v")
+-                   (match_operand:V4SF 5 "register_operand" "v")])
+-               (match_operand:V4SF 1 "register_operand" "v")
+-               (match_operand:V4SF 2 "register_operand" "v")))]
+-	"TARGET_ALTIVEC"
+-	"
+-{
+-	if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+-					  operands[3], operands[4], operands[5]))
+-	DONE;
+-	else
+-	FAIL;
+-}
+-	")
+-
+-(define_expand "vcondv8hi"
+-        [(set (match_operand:V8HI 0 "register_operand" "=v")
+-              (if_then_else:V8HI
+-                (match_operator 3 "comparison_operator"
+-                  [(match_operand:V8HI 4 "register_operand" "v")
+-                   (match_operand:V8HI 5 "register_operand" "v")])
+-               (match_operand:V8HI 1 "register_operand" "v")
+-               (match_operand:V8HI 2 "register_operand" "v")))]
+-	"TARGET_ALTIVEC"
+-	"
+-{
+-	if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+-					  operands[3], operands[4], operands[5]))
+-	DONE;
+-	else
+-	FAIL;
+-}
+-	")
+-
+-(define_expand "vconduv8hi"
+-        [(set (match_operand:V8HI 0 "register_operand" "=v")
+-              (if_then_else:V8HI
+-                (match_operator 3 "comparison_operator"
+-                  [(match_operand:V8HI 4 "register_operand" "v")
+-                   (match_operand:V8HI 5 "register_operand" "v")])
+-               (match_operand:V8HI 1 "register_operand" "v")
+-               (match_operand:V8HI 2 "register_operand" "v")))]
+-	"TARGET_ALTIVEC"
+-	"
+-{
+-	if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+-					  operands[3], operands[4], operands[5]))
+-	DONE;
+-	else
+-	FAIL;
+-}
+-	")
+-
+-(define_expand "vcondv16qi"
+-        [(set (match_operand:V16QI 0 "register_operand" "=v")
+-              (if_then_else:V16QI
+-                (match_operator 3 "comparison_operator"
+-                  [(match_operand:V16QI 4 "register_operand" "v")
+-                   (match_operand:V16QI 5 "register_operand" "v")])
+-               (match_operand:V16QI 1 "register_operand" "v")
+-               (match_operand:V16QI 2 "register_operand" "v")))]
+-	"TARGET_ALTIVEC"
+-	"
+-{
+-	if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+-					  operands[3], operands[4], operands[5]))
+-	DONE;
+-	else
+-	FAIL;
+-}
+-	")
+-
+-(define_expand "vconduv16qi"
+-        [(set (match_operand:V16QI 0 "register_operand" "=v")
+-              (if_then_else:V16QI
+-                (match_operator 3 "comparison_operator"
+-                  [(match_operand:V16QI 4 "register_operand" "v")
+-                   (match_operand:V16QI 5 "register_operand" "v")])
+-               (match_operand:V16QI 1 "register_operand" "v")
+-               (match_operand:V16QI 2 "register_operand" "v")))]
+-	"TARGET_ALTIVEC"
+-	"
+-{
+-	if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+-					  operands[3], operands[4], operands[5]))
+-	DONE;
+-	else
+-	FAIL;
+-}
+-	")
+-
+-
+-(define_insn "altivec_vsel_v4si"
+-  [(set (match_operand:V4SI 0 "register_operand" "=v")
+-        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+-                      (match_operand:V4SI 2 "register_operand" "v")
+-                      (match_operand:V4SI 3 "register_operand" "v")] 
+-		     UNSPEC_VSEL4SI))]
+-  "TARGET_ALTIVEC"
+-  "vsel %0,%1,%2,%3"
+-  [(set_attr "type" "vecperm")])
+-
+-(define_insn "altivec_vsel_v4sf"
+-  [(set (match_operand:V4SF 0 "register_operand" "=v")
+-        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
+-                      (match_operand:V4SF 2 "register_operand" "v")
+-                      (match_operand:V4SI 3 "register_operand" "v")] 
+-	              UNSPEC_VSEL4SF))]
+-  "TARGET_ALTIVEC"
+-  "vsel %0,%1,%2,%3"
+-  [(set_attr "type" "vecperm")])
+-
+-(define_insn "altivec_vsel_v8hi"
+-  [(set (match_operand:V8HI 0 "register_operand" "=v")
+-        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+-                      (match_operand:V8HI 2 "register_operand" "v")
+-                      (match_operand:V8HI 3 "register_operand" "v")] 
+-		     UNSPEC_VSEL8HI))]
+-  "TARGET_ALTIVEC"
+-  "vsel %0,%1,%2,%3"
+-  [(set_attr "type" "vecperm")])
+-
+-(define_insn "altivec_vsel_v16qi"
+-  [(set (match_operand:V16QI 0 "register_operand" "=v")
+-        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+-                       (match_operand:V16QI 2 "register_operand" "v")
+-                       (match_operand:V16QI 3 "register_operand" "v")] 
+-		      UNSPEC_VSEL16QI))]
+-  "TARGET_ALTIVEC"
+-  "vsel %0,%1,%2,%3"
+-  [(set_attr "type" "vecperm")])
+-
+ (define_insn "altivec_vsldoi_<mode>"
+   [(set (match_operand:V 0 "register_operand" "=v")
+         (unspec:V [(match_operand:V 1 "register_operand" "v")
+@@ -1878,6 +1570,14 @@ (define_expand "build_vector_mask_for_lo
+   gcc_assert (GET_CODE (operands[1]) == MEM);
+ 
+   addr = XEXP (operands[1], 0);
++  if (VECTOR_MEM_VSX_P (GET_MODE (operands[1])))
++    {
++      /* VSX doesn't and off the bottom address bits, and memory
++	 operations are aligned to the natural data type.  */
++      emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
++      DONE;
++    }
++
+   temp = gen_reg_rtx (GET_MODE (addr));
+   emit_insn (gen_rtx_SET (VOIDmode, temp, 
+ 			  gen_rtx_NEG (GET_MODE (addr), addr)));
+@@ -1959,95 +1659,6 @@ (define_insn "*altivec_stvesfx"
+   "stvewx %1,%y0"
+   [(set_attr "type" "vecstore")])
+ 
+-(define_expand "vec_init<mode>"
+-  [(match_operand:V 0 "register_operand" "")
+-   (match_operand 1 "" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_init (operands[0], operands[1]);
+-  DONE;
+-})
+-
+-(define_expand "vec_setv4si"
+-  [(match_operand:V4SI 0 "register_operand" "")
+-   (match_operand:SI 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+-(define_expand "vec_setv8hi"
+-  [(match_operand:V8HI 0 "register_operand" "")
+-   (match_operand:HI 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+-(define_expand "vec_setv16qi"
+-  [(match_operand:V16QI 0 "register_operand" "")
+-   (match_operand:QI 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+-(define_expand "vec_setv4sf"
+-  [(match_operand:V4SF 0 "register_operand" "")
+-   (match_operand:SF 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+-(define_expand "vec_extractv4si"
+-  [(match_operand:SI 0 "register_operand" "")
+-   (match_operand:V4SI 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+-(define_expand "vec_extractv8hi"
+-  [(match_operand:HI 0 "register_operand" "")
+-   (match_operand:V8HI 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+-(define_expand "vec_extractv16qi"
+-  [(match_operand:QI 0 "register_operand" "")
+-   (match_operand:V16QI 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+-(define_expand "vec_extractv4sf"
+-  [(match_operand:SF 0 "register_operand" "")
+-   (match_operand:V4SF 1 "register_operand" "")
+-   (match_operand 2 "const_int_operand" "")]
+-  "TARGET_ALTIVEC"
+-{
+-  rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+-  DONE;
+-})
+-
+ ;; Generate
+ ;;    vspltis? SCRATCH0,0
+ ;;    vsubu?m SCRATCH2,SCRATCH1,%1
+@@ -2069,7 +1680,7 @@ (define_expand "abs<mode>2"
+ ;;    vspltisw SCRATCH1,-1
+ ;;    vslw SCRATCH2,SCRATCH1,SCRATCH1
+ ;;    vandc %0,%1,SCRATCH2
+-(define_expand "absv4sf2"
++(define_expand "altivec_absv4sf2"
+   [(set (match_dup 2)
+ 	(vec_duplicate:V4SI (const_int -1)))
+    (set (match_dup 3)
+@@ -2132,7 +1743,7 @@ (define_expand "vec_shl_<mode>"
+   DONE;
+ }")
+ 
+-;; Vector shift left in bits. Currently supported ony for shift
++;; Vector shift right in bits. Currently supported ony for shift
+ ;; amounts that can be expressed as byte shifts (divisible by 8).
+ ;; General shift amounts can be supported using vsro + vsr. We're
+ ;; not expecting to see these yet (the vectorizer currently
+@@ -2665,7 +2276,7 @@ (define_expand "vec_pack_trunc_v4si"
+   DONE;
+ }")
+ 
+-(define_expand "negv4sf2"
++(define_expand "altivec_negv4sf2"
+   [(use (match_operand:V4SF 0 "register_operand" ""))
+    (use (match_operand:V4SF 1 "register_operand" ""))]
+   "TARGET_ALTIVEC"
+@@ -2994,29 +2605,6 @@ (define_expand "vec_extract_oddv16qi"
+   emit_insn (gen_vpkuhum_nomode (operands[0], operands[1], operands[2]));
+   DONE;
+ }")
+-(define_expand "vec_interleave_highv4sf"
+- [(set (match_operand:V4SF 0 "register_operand" "")
+-        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "")
+-                      (match_operand:V4SF 2 "register_operand" "")]
+-                      UNSPEC_INTERHI_V4SF))]
+-  "TARGET_ALTIVEC"
+-  "
+-{ 
+-  emit_insn (gen_altivec_vmrghsf (operands[0], operands[1], operands[2]));
+-  DONE;
+-}")
+-
+-(define_expand "vec_interleave_lowv4sf"
+- [(set (match_operand:V4SF 0 "register_operand" "")
+-        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "")
+-                      (match_operand:V4SF 2 "register_operand" "")]
+-                      UNSPEC_INTERLO_V4SF))]
+-  "TARGET_ALTIVEC"
+-  "
+-{
+-  emit_insn (gen_altivec_vmrglsf (operands[0], operands[1], operands[2]));
+-  DONE;
+-}")
+ 
+ (define_expand "vec_interleave_high<mode>"
+  [(set (match_operand:VI 0 "register_operand" "")
+--- gcc/config/rs6000/aix61.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/aix61.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -57,20 +57,24 @@ do {									\
+ #undef ASM_SPEC
+ #define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
+ 
+-/* Common ASM definitions used by ASM_SPEC amongst the various targets
+-   for handling -mcpu=xxx switches.  */
++/* Common ASM definitions used by ASM_SPEC amongst the various targets for
++   handling -mcpu=xxx switches.  There is a parallel list in driver-rs6000.c to
++   provide the default assembler options if the user uses -mcpu=native, so if
++   you make changes here, make them there also.  */
+ #undef ASM_CPU_SPEC
+ #define ASM_CPU_SPEC \
+ "%{!mcpu*: %{!maix64: \
+   %{mpowerpc64: -mppc64} \
+   %{maltivec: -m970} \
+   %{!maltivec: %{!mpower64: %(asm_default)}}}} \
++%{mcpu=native: %(asm_cpu_native)} \
+ %{mcpu=power3: -m620} \
+ %{mcpu=power4: -mpwr4} \
+ %{mcpu=power5: -mpwr5} \
+ %{mcpu=power5+: -mpwr5x} \
+ %{mcpu=power6: -mpwr6} \
+ %{mcpu=power6x: -mpwr6} \
++%{mcpu=power7: -mpwr7} \
+ %{mcpu=powerpc: -mppc} \
+ %{mcpu=rs64a: -mppc} \
+ %{mcpu=603: -m603} \
+--- gcc/config/rs6000/rs6000.md	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/rs6000.md	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -138,7 +138,7 @@ (define_attr "length" ""
+ ;; Processor type -- this attribute must exactly match the processor_type
+ ;; enumeration in rs6000.h.
+ 
+-(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,power4,power5,power6,cell"
++(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,power4,power5,power6,power7,cell"
+   (const (symbol_ref "rs6000_cpu_attr")))
+ 
+ 
+@@ -167,6 +167,7 @@ (define_attr "cell_micro" "not,condition
+ (include "power4.md")
+ (include "power5.md")
+ (include "power6.md")
++(include "power7.md")
+ (include "cell.md")
+ (include "xfpu.md")
+ 
+@@ -218,6 +219,9 @@ (define_mode_attr wd [(QI "b") (HI "h") 
+ ; DImode bits
+ (define_mode_attr dbits [(QI "56") (HI "48") (SI "32")])
+ 
++;; ISEL/ISEL64 target selection
++(define_mode_attr sel [(SI "") (DI "64")])
++
+ 
+ ;; Start with fixed-point load and store insns.  Here we put only the more
+ ;; complex forms.  Basic data transfer is done later.
+@@ -520,7 +524,7 @@ (define_insn ""
+   "@
+    {andil.|andi.} %2,%1,0xff
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -546,7 +550,7 @@ (define_insn ""
+   "@
+    {andil.|andi.} %0,%1,0xff
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -687,7 +691,7 @@ (define_insn ""
+   "@
+    {andil.|andi.} %2,%1,0xff
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -713,7 +717,7 @@ (define_insn ""
+   "@
+    {andil.|andi.} %0,%1,0xff
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -856,7 +860,7 @@ (define_insn ""
+   "@
+    {andil.|andi.} %2,%1,0xffff
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -882,7 +886,7 @@ (define_insn ""
+   "@
+    {andil.|andi.} %0,%1,0xffff
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -1670,7 +1674,7 @@ (define_insn ""
+   "@
+    nor. %2,%1,%1
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -1696,7 +1700,7 @@ (define_insn ""
+   "@
+    nor. %0,%1,%1
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -2221,10 +2225,22 @@ (define_insn "popcntb<mode>2"
+   "TARGET_POPCNTB"
+   "popcntb %0,%1")
+ 
++(define_insn "popcntwsi2"
++  [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
++	(popcount:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
++  "TARGET_POPCNTD"
++  "popcntw %0,%1")
++
++(define_insn "popcntddi2"
++  [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
++	(popcount:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
++  "TARGET_POPCNTD && TARGET_POWERPC64"
++  "popcntd %0,%1")
++
+ (define_expand "popcount<mode>2"
+   [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ 	(popcount:GPR (match_operand:GPR 1 "gpc_reg_operand" "")))]
+-  "TARGET_POPCNTB"
++  "TARGET_POPCNTB || TARGET_POPCNTD"
+   {
+     rs6000_emit_popcount (operands[0], operands[1]);
+     DONE;
+@@ -2852,7 +2868,7 @@ (define_insn "andsi3_mc"
+    {rlinm|rlwinm} %0,%1,0,%m2,%M2
+    {andil.|andi.} %0,%1,%b2
+    {andiu.|andis.} %0,%1,%u2"
+-  [(set_attr "type" "*,*,compare,compare")])
++  [(set_attr "type" "*,*,fast_compare,fast_compare")])
+ 
+ (define_insn "andsi3_nomc"
+   [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+@@ -2895,7 +2911,8 @@ (define_insn "*andsi3_internal2_mc"
+    #
+    #
+    #"
+-  [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
++  [(set_attr "type" "fast_compare,fast_compare,fast_compare,delayed_compare,\
++		     compare,compare,compare,compare")
+    (set_attr "length" "4,4,4,4,8,8,8,8")])
+ 
+ (define_insn "*andsi3_internal3_mc"
+@@ -2915,7 +2932,8 @@ (define_insn "*andsi3_internal3_mc"
+    #
+    #
+    #"
+-  [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
++  [(set_attr "type" "compare,fast_compare,fast_compare,delayed_compare,compare,\
++		     compare,compare,compare")
+    (set_attr "length" "8,4,4,4,8,8,8,8")])
+ 
+ (define_split
+@@ -2974,7 +2992,8 @@ (define_insn "*andsi3_internal4"
+    #
+    #
+    #"
+-  [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
++  [(set_attr "type" "fast_compare,fast_compare,fast_compare,delayed_compare,\
++		     compare,compare,compare,compare")
+    (set_attr "length" "4,4,4,4,8,8,8,8")])
+ 
+ (define_insn "*andsi3_internal5_mc"
+@@ -2996,7 +3015,8 @@ (define_insn "*andsi3_internal5_mc"
+    #
+    #
+    #"
+-  [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
++  [(set_attr "type" "compare,fast_compare,fast_compare,delayed_compare,compare,\
++		    compare,compare,compare")
+    (set_attr "length" "8,4,4,4,8,8,8,8")])
+ 
+ (define_split
+@@ -3127,7 +3147,7 @@ (define_insn "*boolsi3_internal2"
+   "@
+    %q4. %3,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -3156,7 +3176,7 @@ (define_insn "*boolsi3_internal3"
+   "@
+    %q4. %0,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -3281,7 +3301,7 @@ (define_insn "*boolccsi3_internal2"
+   "@
+    %q4. %3,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -3310,7 +3330,7 @@ (define_insn "*boolccsi3_internal3"
+   "@
+    %q4. %0,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -5303,7 +5323,7 @@ (define_insn "fres"
+   "fres %0,%1"
+   [(set_attr "type" "fp")])
+ 
+-(define_insn ""
++(define_insn "*fmaddsf4_powerpc"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 			  (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5314,7 +5334,7 @@ (define_insn ""
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_maddsub_s")])
+ 
+-(define_insn ""
++(define_insn "*fmaddsf4_power"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 			  (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5323,7 +5343,7 @@ (define_insn ""
+   "{fma|fmadd} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")])
+ 
+-(define_insn ""
++(define_insn "*fmsubsf4_powerpc"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 			   (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5334,7 +5354,7 @@ (define_insn ""
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_maddsub_s")])
+ 
+-(define_insn ""
++(define_insn "*fmsubsf4_power"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 			   (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5343,7 +5363,7 @@ (define_insn ""
+   "{fms|fmsub} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")])
+ 
+-(define_insn ""
++(define_insn "*fnmaddsf4_powerpc_1"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 				  (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5354,7 +5374,7 @@ (define_insn ""
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_maddsub_s")])
+ 
+-(define_insn ""
++(define_insn "*fnmaddsf4_powerpc_2"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
+ 			   (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5365,7 +5385,7 @@ (define_insn ""
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_maddsub_s")])
+ 
+-(define_insn ""
++(define_insn "*fnmaddsf4_power_1"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 				  (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5374,7 +5394,7 @@ (define_insn ""
+   "{fnma|fnmadd} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")])
+ 
+-(define_insn ""
++(define_insn "*fnmaddsf4_power_2"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
+ 			   (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5384,7 +5404,7 @@ (define_insn ""
+   "{fnma|fnmadd} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")])
+ 
+-(define_insn ""
++(define_insn "*fnmsubsf4_powerpc_1"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 				   (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5395,7 +5415,7 @@ (define_insn ""
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_maddsub_s")])
+ 
+-(define_insn ""
++(define_insn "*fnmsubsf4_powerpc_2"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
+ 		  (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+@@ -5406,7 +5426,7 @@ (define_insn ""
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_maddsub_s")])
+ 
+-(define_insn ""
++(define_insn "*fnmsubsf4_power_1"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ 				   (match_operand:SF 2 "gpc_reg_operand" "f"))
+@@ -5415,7 +5435,7 @@ (define_insn ""
+   "{fnms|fnmsub} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")])
+ 
+-(define_insn ""
++(define_insn "*fnmsubsf4_power_2"
+   [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ 	(minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
+ 		  (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+@@ -5496,9 +5516,18 @@ (define_expand "copysigndf3"
+ 	                     (match_dup 5))
+ 			 (match_dup 3)
+ 			 (match_dup 4)))]
+-  "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+-   && !HONOR_NANS (DFmode) && !HONOR_SIGNED_ZEROS (DFmode)"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && ((TARGET_PPC_GFXOPT
++        && !HONOR_NANS (DFmode)
++        && !HONOR_SIGNED_ZEROS (DFmode))
++       || VECTOR_UNIT_VSX_P (DFmode))"
+   {
++     if (VECTOR_UNIT_VSX_P (DFmode))
++       {
++	 emit_insn (gen_vsx_copysigndf3 (operands[0], operands[1],
++					 operands[2]));
++	 DONE;
++       }
+      operands[3] = gen_reg_rtx (DFmode);
+      operands[4] = gen_reg_rtx (DFmode);
+      operands[5] = CONST0_RTX (DFmode);
+@@ -5542,12 +5571,12 @@ (define_split
+   DONE;
+ }")
+ 
+-(define_expand "movsicc"
+-   [(set (match_operand:SI 0 "gpc_reg_operand" "")
+-	 (if_then_else:SI (match_operand 1 "comparison_operator" "")
+-			  (match_operand:SI 2 "gpc_reg_operand" "")
+-			  (match_operand:SI 3 "gpc_reg_operand" "")))]
+-  "TARGET_ISEL"
++(define_expand "mov<mode>cc"
++   [(set (match_operand:GPR 0 "gpc_reg_operand" "")
++	 (if_then_else:GPR (match_operand 1 "comparison_operator" "")
++			   (match_operand:GPR 2 "gpc_reg_operand" "")
++			   (match_operand:GPR 3 "gpc_reg_operand" "")))]
++  "TARGET_ISEL<sel>"
+   "
+ {
+   if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+@@ -5564,28 +5593,28 @@ (define_expand "movsicc"
+ ;; leave out the mode in operand 4 and use one pattern, but reload can
+ ;; change the mode underneath our feet and then gets confused trying
+ ;; to reload the value.
+-(define_insn "isel_signed"
+-  [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+-	(if_then_else:SI
++(define_insn "isel_signed_<mode>"
++  [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
++	(if_then_else:GPR
+ 	 (match_operator 1 "comparison_operator"
+ 			 [(match_operand:CC 4 "cc_reg_operand" "y")
+ 			  (const_int 0)])
+-	 (match_operand:SI 2 "gpc_reg_operand" "b")
+-	 (match_operand:SI 3 "gpc_reg_operand" "b")))]
+-  "TARGET_ISEL"
++	 (match_operand:GPR 2 "gpc_reg_operand" "b")
++	 (match_operand:GPR 3 "gpc_reg_operand" "b")))]
++  "TARGET_ISEL<sel>"
+   "*
+ { return output_isel (operands); }"
+   [(set_attr "length" "4")])
+ 
+-(define_insn "isel_unsigned"
+-  [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+-	(if_then_else:SI
++(define_insn "isel_unsigned_<mode>"
++  [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
++	(if_then_else:GPR
+ 	 (match_operator 1 "comparison_operator"
+ 			 [(match_operand:CCUNS 4 "cc_reg_operand" "y")
+ 			  (const_int 0)])
+-	 (match_operand:SI 2 "gpc_reg_operand" "b")
+-	 (match_operand:SI 3 "gpc_reg_operand" "b")))]
+-  "TARGET_ISEL"
++	 (match_operand:GPR 2 "gpc_reg_operand" "b")
++	 (match_operand:GPR 3 "gpc_reg_operand" "b")))]
++  "TARGET_ISEL<sel>"
+   "*
+ { return output_isel (operands); }"
+   [(set_attr "length" "4")])
+@@ -5633,7 +5662,8 @@ (define_expand "negdf2"
+ (define_insn "*negdf2_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(neg:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "fneg %0,%1"
+   [(set_attr "type" "fp")])
+ 
+@@ -5646,14 +5676,16 @@ (define_expand "absdf2"
+ (define_insn "*absdf2_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(abs:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "fabs %0,%1"
+   [(set_attr "type" "fp")])
+ 
+ (define_insn "*nabsdf2_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f"))))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "fnabs %0,%1"
+   [(set_attr "type" "fp")])
+ 
+@@ -5668,7 +5700,8 @@ (define_insn "*adddf3_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(plus:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ 		 (match_operand:DF 2 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "{fa|fadd} %0,%1,%2"
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_addsub_d")])
+@@ -5684,7 +5717,8 @@ (define_insn "*subdf3_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(minus:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ 		  (match_operand:DF 2 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "{fs|fsub} %0,%1,%2"
+   [(set_attr "type" "fp")
+    (set_attr "fp_type" "fp_addsub_d")])
+@@ -5700,7 +5734,8 @@ (define_insn "*muldf3_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ 		 (match_operand:DF 2 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "{fm|fmul} %0,%1,%2"
+   [(set_attr "type" "dmul")
+    (set_attr "fp_type" "fp_mul_d")])
+@@ -5718,7 +5753,8 @@ (define_insn "*divdf3_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(div:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ 		(match_operand:DF 2 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && !TARGET_SIMPLE_FPU"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && !TARGET_SIMPLE_FPU
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "{fd|fdiv} %0,%1,%2"
+   [(set_attr "type" "ddiv")])
+ 
+@@ -5734,73 +5770,81 @@ (define_expand "recipdf3"
+    DONE;
+ })
+ 
+-(define_insn "fred"
++(define_expand "fred"
++  [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
++	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
++  "(TARGET_POPCNTB || VECTOR_UNIT_VSX_P (DFmode)) && flag_finite_math_only"
++  "")
++
++(define_insn "*fred_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+-  "TARGET_POPCNTB && flag_finite_math_only"
++  "TARGET_POPCNTB && flag_finite_math_only && !VECTOR_UNIT_VSX_P (DFmode)"
+   "fre %0,%1"
+   [(set_attr "type" "fp")])
+ 
+-(define_insn ""
++(define_insn "*fmadddf4_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ 			  (match_operand:DF 2 "gpc_reg_operand" "f"))
+ 		 (match_operand:DF 3 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
++   && VECTOR_UNIT_NONE_P (DFmode)"
+   "{fma|fmadd} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")
+    (set_attr "fp_type" "fp_maddsub_d")])
+ 
+-(define_insn ""
++(define_insn "*fmsubdf4_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ 			   (match_operand:DF 2 "gpc_reg_operand" "f"))
+ 		  (match_operand:DF 3 "gpc_reg_operand" "f")))]
+-  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT"
++  "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
++   && VECTOR_UNIT_NONE_P (DFmode)"
+   "{fms|fmsub} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")
+    (set_attr "fp_type" "fp_maddsub_d")])
+ 
+-(define_insn ""
++(define_insn "*fnmadddf4_fpr_1"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(neg:DF (plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ 				  (match_operand:DF 2 "gpc_reg_operand" "f"))
+ 			 (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+   "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+-   && HONOR_SIGNED_ZEROS (DFmode)"
++   && HONOR_SIGNED_ZEROS (DFmode) && VECTOR_UNIT_NONE_P (DFmode)"
+   "{fnma|fnmadd} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")
+    (set_attr "fp_type" "fp_maddsub_d")])
+ 
+-(define_insn ""
++(define_insn "*fnmadddf4_fpr_2"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(minus:DF (mult:DF (neg:DF (match_operand:DF 1 "gpc_reg_operand" "f"))
+ 			   (match_operand:DF 2 "gpc_reg_operand" "f"))
+ 		  (match_operand:DF 3 "gpc_reg_operand" "f")))]
+   "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+-   && ! HONOR_SIGNED_ZEROS (DFmode)"
++   && ! HONOR_SIGNED_ZEROS (DFmode) && VECTOR_UNIT_NONE_P (DFmode)"
+   "{fnma|fnmadd} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")
+    (set_attr "fp_type" "fp_maddsub_d")])
+ 
+-(define_insn ""
++(define_insn "*fnmsubdf4_fpr_1"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(neg:DF (minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ 				   (match_operand:DF 2 "gpc_reg_operand" "f"))
+ 			  (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+   "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+-   && HONOR_SIGNED_ZEROS (DFmode)"
++   && HONOR_SIGNED_ZEROS (DFmode) && VECTOR_UNIT_NONE_P (DFmode)"
+   "{fnms|fnmsub} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")
+    (set_attr "fp_type" "fp_maddsub_d")])
+ 
+-(define_insn ""
++(define_insn "*fnmsubdf4_fpr_2"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(minus:DF (match_operand:DF 3 "gpc_reg_operand" "f")
+ 	          (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ 			   (match_operand:DF 2 "gpc_reg_operand" "f"))))]
+   "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+-   && ! HONOR_SIGNED_ZEROS (DFmode)"
++   && ! HONOR_SIGNED_ZEROS (DFmode) && VECTOR_UNIT_NONE_P (DFmode)"
+   "{fnms|fnmsub} %0,%1,%2,%3"
+   [(set_attr "type" "dmul")
+    (set_attr "fp_type" "fp_maddsub_d")])
+@@ -5809,7 +5853,8 @@ (define_insn "sqrtdf2"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(sqrt:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+   "(TARGET_PPC_GPOPT || TARGET_POWER2) && TARGET_HARD_FLOAT && TARGET_FPRS 
+-   && TARGET_DOUBLE_FLOAT"
++   && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "fsqrt %0,%1"
+   [(set_attr "type" "dsqrt")])
+ 
+@@ -5898,6 +5943,18 @@ (define_expand "fix_truncsfsi2"
+  "TARGET_HARD_FLOAT && !TARGET_FPRS && TARGET_SINGLE_FLOAT"
+  "")
+ 
++(define_expand "fixuns_truncdfsi2"
++  [(set (match_operand:SI 0 "gpc_reg_operand" "")
++	(unsigned_fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))]
++  "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
++  "")
++
++(define_expand "fixuns_truncdfdi2"
++  [(set (match_operand:DI 0 "register_operand" "")
++	(unsigned_fix:DI (match_operand:DF 1 "register_operand" "")))]
++  "TARGET_HARD_FLOAT && TARGET_VSX"
++  "")
++
+ ; For each of these conversions, there is a define_expand, a define_insn
+ ; with a '#' template, and a define_split (with C code).  The idea is
+ ; to allow constant folding with the template of the define_insn,
+@@ -6139,10 +6196,17 @@ (define_insn "fctiwz"
+   "{fcirz|fctiwz} %0,%1"
+   [(set_attr "type" "fp")])
+ 
+-(define_insn "btruncdf2"
++(define_expand "btruncdf2"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
+   "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "")
++
++(define_insn "*btruncdf2_fprs"
++  [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
++	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
++  "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "friz %0,%1"
+   [(set_attr "type" "fp")])
+ 
+@@ -6153,10 +6217,17 @@ (define_insn "btruncsf2"
+   "friz %0,%1"
+   [(set_attr "type" "fp")])
+ 
+-(define_insn "ceildf2"
++(define_expand "ceildf2"
++  [(set (match_operand:DF 0 "gpc_reg_operand" "")
++	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "")] UNSPEC_FRIP))]
++  "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "")
++
++(define_insn "*ceildf2_fprs"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIP))]
+-  "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "frip %0,%1"
+   [(set_attr "type" "fp")])
+ 
+@@ -6167,10 +6238,17 @@ (define_insn "ceilsf2"
+   "frip %0,%1"
+   [(set_attr "type" "fp")])
+ 
+-(define_insn "floordf2"
++(define_expand "floordf2"
++  [(set (match_operand:DF 0 "gpc_reg_operand" "")
++	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "")] UNSPEC_FRIM))]
++  "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "")
++
++(define_insn "*floordf2_fprs"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIM))]
+-  "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
++  "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "frim %0,%1"
+   [(set_attr "type" "fp")])
+ 
+@@ -6181,6 +6259,7 @@ (define_insn "floorsf2"
+   "frim %0,%1"
+   [(set_attr "type" "fp")])
+ 
++;; No VSX equivalent to frin
+ (define_insn "rounddf2"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIN))]
+@@ -6195,6 +6274,12 @@ (define_insn "roundsf2"
+   "frin %0,%1"
+   [(set_attr "type" "fp")])
+ 
++(define_expand "ftruncdf2"
++  [(set (match_operand:DF 0 "gpc_reg_operand" "")
++  	(fix:DF (match_operand:DF 1 "gpc_reg_operand" "")))]
++  "VECTOR_UNIT_VSX_P (DFmode)"
++  "")
++
+ ; An UNSPEC is used so we don't have to support SImode in FP registers.
+ (define_insn "stfiwx"
+   [(set (match_operand:SI 0 "memory_operand" "=Z")
+@@ -6210,17 +6295,40 @@ (define_expand "floatsisf2"
+   "TARGET_HARD_FLOAT && !TARGET_FPRS"
+   "")
+ 
+-(define_insn "floatdidf2"
++(define_expand "floatdidf2"
++  [(set (match_operand:DF 0 "gpc_reg_operand" "")
++	(float:DF (match_operand:DI 1 "gpc_reg_operand" "")))]
++  "(TARGET_POWERPC64 || TARGET_XILINX_FPU || VECTOR_UNIT_VSX_P (DFmode))
++   && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && TARGET_FPRS"
++  "")
++
++(define_insn "*floatdidf2_fpr"
+   [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ 	(float:DF (match_operand:DI 1 "gpc_reg_operand" "!f#r")))]
+-  "(TARGET_POWERPC64 || TARGET_XILINX_FPU) && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && TARGET_FPRS"
++  "(TARGET_POWERPC64 || TARGET_XILINX_FPU)
++   && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && TARGET_FPRS
++   && !VECTOR_UNIT_VSX_P (DFmode)"
+   "fcfid %0,%1"
+   [(set_attr "type" "fp")])
+ 
+-(define_insn "fix_truncdfdi2"
++(define_expand "floatunsdidf2"
++  [(set (match_operand:DF 0 "gpc_reg_operand" "")
++	(unsigned_float:DF (match_operand:DI 1 "gpc_reg_operand" "")))]
++  "TARGET_VSX"
++  "")
++
++(define_expand "fix_truncdfdi2"
++  [(set (match_operand:DI 0 "gpc_reg_operand" "")
++	(fix:DI (match_operand:DF 1 "gpc_reg_operand" "")))]
++  "(TARGET_POWERPC64 || TARGET_XILINX_FPU || VECTOR_UNIT_VSX_P (DFmode))
++    && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && TARGET_FPRS"
++  "")
++
++(define_insn "*fix_truncdfdi2_fpr"
+   [(set (match_operand:DI 0 "gpc_reg_operand" "=!f#r")
+ 	(fix:DI (match_operand:DF 1 "gpc_reg_operand" "f")))]
+-  "(TARGET_POWERPC64 || TARGET_XILINX_FPU) && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && TARGET_FPRS"
++  "(TARGET_POWERPC64 || TARGET_XILINX_FPU) && TARGET_HARD_FLOAT
++    && TARGET_DOUBLE_FLOAT && TARGET_FPRS && !VECTOR_UNIT_VSX_P (DFmode)"
+   "fctidz %0,%1"
+   [(set_attr "type" "fp")])
+ 
+@@ -7609,7 +7717,7 @@ (define_insn "anddi3_mc"
+    andi. %0,%1,%b2
+    andis. %0,%1,%u2
+    #"
+-  [(set_attr "type" "*,*,*,compare,compare,*")
++  [(set_attr "type" "*,*,*,fast_compare,fast_compare,*")
+    (set_attr "length" "4,4,4,4,4,8")])
+ 
+ (define_insn "anddi3_nomc"
+@@ -7667,7 +7775,9 @@ (define_insn "*anddi3_internal2_mc"
+    #
+    #
+    #"
+-  [(set_attr "type" "compare,compare,delayed_compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
++  [(set_attr "type" "fast_compare,compare,delayed_compare,fast_compare,\
++		     fast_compare,compare,compare,compare,compare,compare,\
++		     compare,compare")
+    (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+ 
+ (define_split
+@@ -7718,7 +7828,9 @@ (define_insn "*anddi3_internal3_mc"
+    #
+    #
+    #"
+-  [(set_attr "type" "compare,compare,delayed_compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
++  [(set_attr "type" "fast_compare,compare,delayed_compare,fast_compare,\
++		     fast_compare,compare,compare,compare,compare,compare,\
++		     compare,compare")
+    (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+ 
+ (define_split
+@@ -7858,7 +7970,7 @@ (define_insn "*booldi3_internal2"
+   "@
+    %q4. %3,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -7887,7 +7999,7 @@ (define_insn "*booldi3_internal3"
+   "@
+    %q4. %0,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -7958,7 +8070,7 @@ (define_insn "*boolcdi3_internal2"
+   "@
+    %q4. %3,%2,%1
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -7987,7 +8099,7 @@ (define_insn "*boolcdi3_internal3"
+   "@
+    %q4. %0,%2,%1
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -8024,7 +8136,7 @@ (define_insn "*boolccdi3_internal2"
+   "@
+    %q4. %3,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -8053,7 +8165,7 @@ (define_insn "*boolccdi3_internal3"
+   "@
+    %q4. %0,%1,%2
+    #"
+-  [(set_attr "type" "compare")
++  [(set_attr "type" "fast_compare,compare")
+    (set_attr "length" "4,8")])
+ 
+ (define_split
+@@ -8070,6 +8182,51 @@ (define_split
+ 	(compare:CC (match_dup 0)
+ 		    (const_int 0)))]
+   "")
++
++(define_expand "smindi3"
++  [(match_operand:DI 0 "gpc_reg_operand" "")
++   (match_operand:DI 1 "gpc_reg_operand" "")
++   (match_operand:DI 2 "gpc_reg_operand" "")]
++  "TARGET_ISEL64"
++  "
++{
++  rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]);
++  DONE;
++}")
++
++(define_expand "smaxdi3"
++  [(match_operand:DI 0 "gpc_reg_operand" "")
++   (match_operand:DI 1 "gpc_reg_operand" "")
++   (match_operand:DI 2 "gpc_reg_operand" "")]
++  "TARGET_ISEL64"
++  "
++{
++  rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]);
++  DONE;
++}")
++
++(define_expand "umindi3"
++  [(match_operand:DI 0 "gpc_reg_operand" "")
++   (match_operand:DI 1 "gpc_reg_operand" "")
++   (match_operand:DI 2 "gpc_reg_operand" "")]
++  "TARGET_ISEL64"
++  "
++{
++  rs6000_emit_minmax (operands[0], UMIN, operands[1], operands[2]);
++  DONE;
++}")
++
++(define_expand "umaxdi3"
++  [(match_operand:DI 0 "gpc_reg_operand" "")
++   (match_operand:DI 1 "gpc_reg_operand" "")
++   (match_operand:DI 2 "gpc_reg_operand" "")]
++  "TARGET_ISEL64"
++  "
++{
++  rs6000_emit_minmax (operands[0], UMAX, operands[1], operands[2]);
++  DONE;
++}")
++
+ 
+ ;; Now define ways of moving data around.
+ 
+@@ -8473,8 +8630,8 @@ (define_split
+ ;; The "??" is a kludge until we can figure out a more reasonable way
+ ;; of handling these non-offsettable values.
+ (define_insn "*movdf_hardfloat32"
+-  [(set (match_operand:DF 0 "nonimmediate_operand" "=!r,??r,m,f,f,m,!r,!r,!r")
+-	(match_operand:DF 1 "input_operand" "r,m,r,f,m,f,G,H,F"))]
++  [(set (match_operand:DF 0 "nonimmediate_operand" "=!r, ??r, m, ws, ?wa, ws, ?wa,  Z, ?Z, f, f, m, wa, !r, !r, !r")
++	(match_operand:DF 1 "input_operand"          "r,   m, r, ws,  wa,  Z,   Z, ws, wa, f, m, f,  j,  G,  H,  F"))]
+   "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT 
+    && (gpc_reg_operand (operands[0], DFmode)
+        || gpc_reg_operand (operands[1], DFmode))"
+@@ -8553,19 +8710,30 @@ (define_insn "*movdf_hardfloat32"
+ 	  return \"\";
+ 	}
+     case 3:
+-      return \"fmr %0,%1\";
+     case 4:
+-      return \"lfd%U1%X1 %0,%1\";
++      return \"xscpsgndp %x0,%x1,%x1\";
+     case 5:
+-      return \"stfd%U0%X0 %1,%0\";
+     case 6:
++      return \"lxsd%U1x %x0,%y1\";
+     case 7:
+     case 8:
++      return \"stxsd%U0x %x1,%y0\";
++    case 9:
++      return \"fmr %0,%1\";
++    case 10:
++      return \"lfd%U1%X1 %0,%1\";
++    case 11:
++      return \"stfd%U0%X0 %1,%0\";
++    case 12:
++      return \"xxlxor %x0,%x0,%x0\";
++    case 13:
++    case 14:
++    case 15:
+       return \"#\";
+     }
+ }"
+-  [(set_attr "type" "two,load,store,fp,fpload,fpstore,*,*,*")
+-   (set_attr "length" "8,16,16,4,4,4,8,12,16")])
++  [(set_attr "type" "two, load, store, fp, fp, fpload, fpload, fpstore, fpstore, fp, fpload, fpstore, vecsimple, *,  *,  *")
++   (set_attr "length" "8,   16,    16,  4,  4,      4,      4,       4,       4,  4,      4,       4,         4, 8, 12, 16")])
+ 
+ (define_insn "*movdf_softfloat32"
+   [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,r,r,r")
+@@ -8613,19 +8781,26 @@ (define_insn "*movdf_softfloat32"
+ ; ld/std require word-aligned displacements -> 'Y' constraint.
+ ; List Y->r and r->Y before r->r for reload.
+ (define_insn "*movdf_hardfloat64_mfpgpr"
+-  [(set (match_operand:DF 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r,r,f")
+-	(match_operand:DF 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F,f,r"))]
++  [(set (match_operand:DF 0 "nonimmediate_operand" "=Y, r, !r, ws, ?wa, ws, ?wa,  Z, ?Z, f, f, m, wa, *c*l, !r, *h, !r, !r, !r, r, f")
++	(match_operand:DF 1 "input_operand"         "r, Y,  r, ws, ?wa,  Z,   Z, ws, wa, f, m, f,  j,    r,  h,  0,  G,  H,  F, f, r"))]
+   "TARGET_POWERPC64 && TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS 
+-   && TARGET_DOUBLE_FLOAT 
++   && TARGET_DOUBLE_FLOAT
+    && (gpc_reg_operand (operands[0], DFmode)
+        || gpc_reg_operand (operands[1], DFmode))"
+   "@
+    std%U0%X0 %1,%0
+    ld%U1%X1 %0,%1
+    mr %0,%1
++   xscpsgndp %x0,%x1,%x1
++   xscpsgndp %x0,%x1,%x1
++   lxsd%U1x %x0,%y1
++   lxsd%U1x %x0,%y1
++   stxsd%U0x %x1,%y0
++   stxsd%U0x %x1,%y0
+    fmr %0,%1
+    lfd%U1%X1 %0,%1
+    stfd%U0%X0 %1,%0
++   xxlxor %x0,%x0,%x0
+    mt%0 %1
+    mf%1 %0
+    {cror 0,0,0|nop}
+@@ -8634,33 +8809,40 @@ (define_insn "*movdf_hardfloat64_mfpgpr"
+    #
+    mftgpr %0,%1
+    mffgpr %0,%1"
+-  [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*,mftgpr,mffgpr")
+-   (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16,4,4")])
++  [(set_attr "type" "store, load, *, fp, fp, fpload, fpload, fpstore, fpstore, fp, fpload, fpstore, vecsimple, mtjmpr, mfjmpr, *, *,  *,  *, mftgpr, mffgpr")
++   (set_attr "length"   "4,    4, 4,  4,  4,      4,      4,       4,       4,  4,      4,       4,         4,      4,      4, 4, 8, 12, 16,      4,      4")])
+ 
+ ; ld/std require word-aligned displacements -> 'Y' constraint.
+ ; List Y->r and r->Y before r->r for reload.
+ (define_insn "*movdf_hardfloat64"
+-  [(set (match_operand:DF 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r")
+-	(match_operand:DF 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F"))]
++  [(set (match_operand:DF 0 "nonimmediate_operand" "=Y, r, !r, ws, ?wa, ws, ?wa,  Z, ?Z, f, f, m, wa, *c*l, !r, *h, !r, !r, !r")
++	(match_operand:DF 1 "input_operand"         "r, Y,  r, ws,  wa,  Z,   Z, ws, wa, f, m, f,  j,    r,  h,  0,  G,  H,  F"))]
+   "TARGET_POWERPC64 && !TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS 
+-   && TARGET_DOUBLE_FLOAT 
++   && TARGET_DOUBLE_FLOAT
+    && (gpc_reg_operand (operands[0], DFmode)
+        || gpc_reg_operand (operands[1], DFmode))"
+   "@
+    std%U0%X0 %1,%0
+    ld%U1%X1 %0,%1
+    mr %0,%1
++   xscpsgndp %x0,%x1,%x1
++   xscpsgndp %x0,%x1,%x1
++   lxsd%U1x %x0,%y1
++   lxsd%U1x %x0,%y1
++   stxsd%U0x %x1,%y0
++   stxsd%U0x %x1,%y0
+    fmr %0,%1
+    lfd%U1%X1 %0,%1
+    stfd%U0%X0 %1,%0
++   xxlxor %x0,%x0,%x0
+    mt%0 %1
+    mf%1 %0
+    {cror 0,0,0|nop}
+    #
+    #
+    #"
+-  [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*")
+-   (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16")])
++  [(set_attr "type" "store, load, *, fp, fp, fpload, fpload, fpstore, fpstore, fp, fpload, fpstore, vecsimple, mtjmpr, mfjmpr,  *,  *,  *,  *")
++   (set_attr "length" "  4,    4, 4,  4,  4,      4,      4,       4,       4,  4,      4,       4,         4,      4,      4,  4,  8, 12, 16")])
+ 
+ (define_insn "*movdf_softfloat64"
+   [(set (match_operand:DF 0 "nonimmediate_operand" "=r,Y,r,cl,r,r,r,r,*h")
+@@ -9237,15 +9419,16 @@ (define_insn "*movti_string"
+ (define_insn "*movti_ppc64"
+   [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o<>,r")
+ 	(match_operand:TI 1 "input_operand" "r,r,m"))]
+-  "TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode)
+-   || gpc_reg_operand (operands[1], TImode))"
++  "(TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode)
++    || gpc_reg_operand (operands[1], TImode)))
++   && VECTOR_MEM_NONE_P (TImode)"
+   "#"
+   [(set_attr "type" "*,load,store")])
+ 
+ (define_split
+   [(set (match_operand:TI 0 "gpc_reg_operand" "")
+ 	(match_operand:TI 1 "const_double_operand" ""))]
+-  "TARGET_POWERPC64"
++  "TARGET_POWERPC64 && VECTOR_MEM_NONE_P (TImode)"
+   [(set (match_dup 2) (match_dup 4))
+    (set (match_dup 3) (match_dup 5))]
+   "
+@@ -9271,7 +9454,7 @@ (define_split
+ (define_split
+   [(set (match_operand:TI 0 "nonimmediate_operand" "")
+         (match_operand:TI 1 "input_operand" ""))]
+-  "reload_completed
++  "reload_completed && VECTOR_MEM_NONE_P (TImode)
+    && gpr_or_gpr_p (operands[0], operands[1])"
+   [(pc)]
+ { rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+@@ -14891,6 +15074,8 @@ (define_insn "prefetch"
+ 
+ 
+ (include "sync.md")
++(include "vector.md")
++(include "vsx.md")
+ (include "altivec.md")
+ (include "spe.md")
+ (include "dfp.md")
+--- gcc/config/rs6000/e500.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/e500.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -37,6 +37,8 @@
+       {									\
+ 	if (TARGET_ALTIVEC)						\
+ 	  error ("AltiVec and E500 instructions cannot coexist");	\
++	if (TARGET_VSX)							\
++	  error ("VSX and E500 instructions cannot coexist");		\
+ 	if (TARGET_64BIT)						\
+ 	  error ("64-bit E500 not supported");				\
+ 	if (TARGET_HARD_FLOAT && TARGET_FPRS)				\
+--- gcc/config/rs6000/driver-rs6000.c	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/driver-rs6000.c	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -343,11 +343,115 @@ detect_processor_aix (void)
+ #endif /* _AIX */
+ 
+ 
++/*
++ * Array to map -mcpu=native names to the switches passed to the assembler.
++ * This list mirrors the specs in ASM_CPU_SPEC, and any changes made here
++ * should be made there as well.
++ */
++
++struct asm_name {
++  const char *cpu;
++  const char *asm_sw;
++};
++
++static const
++struct asm_name asm_names[] = {
++#if defined (_AIX)
++  { "power3",	"-m620" },
++  { "power4",	"-mpwr4" },
++  { "power5",	"-mpwr5" },
++  { "power5+",	"-mpwr5x" },
++  { "power6",	"-mpwr6" },
++  { "power6x",	"-mpwr6" },
++  { "power7",	"-mpwr7" },
++  { "powerpc",	"-mppc" },
++  { "rs64a",	"-mppc" },
++  { "603",	"-m603" },
++  { "603e",	"-m603" },
++  { "604",	"-m604" },
++  { "604e",	"-m604" },
++  { "620",	"-m620" },
++  { "630",	"-m620" },
++  { "970",	"-m970" },
++  { "G5",	"-m970" },
++  { NULL,	"\
++%{!maix64: \
++%{mpowerpc64: -mppc64} \
++%{maltivec: -m970} \
++%{!maltivec: %{!mpower64: %(asm_default)}}}" },
++
++#else
++  { "common",	"-mcom" },
++  { "cell",	"-mcell" },
++  { "power",	"-mpwr" },
++  { "power2",	"-mpwrx" },
++  { "power3",	"-mppc64" },
++  { "power4",	"-mpower4" },
++  { "power5",	"%(asm_cpu_power5)" },
++  { "power5+",	"%(asm_cpu_power5)" },
++  { "power6",	"%(asm_cpu_power6) -maltivec" },
++  { "power6x",	"%(asm_cpu_power6) -maltivec" },
++  { "power7",	"%(asm_cpu_power7)" },
++  { "powerpc",	"-mppc" },
++  { "rios",	"-mpwr" },
++  { "rios1",	"-mpwr" },
++  { "rios2",	"-mpwrx" },
++  { "rsc",	"-mpwr" },
++  { "rsc1",	"-mpwr" },
++  { "rs64a",	"-mppc64" },
++  { "401",	"-mppc" },
++  { "403",	"-m403" },
++  { "405",	"-m405" },
++  { "405fp",	"-m405" },
++  { "440",	"-m440" },
++  { "440fp",	"-m440" },
++  { "464",	"-m440" },
++  { "464fp",	"-m440" },
++  { "505",	"-mppc" },
++  { "601",	"-m601" },
++  { "602",	"-mppc" },
++  { "603",	"-mppc" },
++  { "603e",	"-mppc" },
++  { "ec603e",	"-mppc" },
++  { "604",	"-mppc" },
++  { "604e",	"-mppc" },
++  { "620",	"-mppc64" },
++  { "630",	"-mppc64" },
++  { "740",	"-mppc" },
++  { "750",	"-mppc" },
++  { "G3",	"-mppc" },
++  { "7400",	"-mppc -maltivec" },
++  { "7450",	"-mppc -maltivec" },
++  { "G4",	"-mppc -maltivec" },
++  { "801",	"-mppc" },
++  { "821",	"-mppc" },
++  { "823",	"-mppc" },
++  { "860",	"-mppc" },
++  { "970",	"-mpower4 -maltivec" },
++  { "G5",	"-mpower4 -maltivec" },
++  { "8540",	"-me500" },
++  { "8548",	"-me500" },
++  { "e300c2",	"-me300" },
++  { "e300c3",	"-me300" },
++  { "e500mc",	"-me500mc" },
++  { NULL,	"\
++%{mpower: %{!mpower2: -mpwr}} \
++%{mpower2: -mpwrx} \
++%{mpowerpc64*: -mppc64} \
++%{!mpowerpc64*: %{mpowerpc*: -mppc}} \
++%{mno-power: %{!mpowerpc*: -mcom}} \
++%{!mno-power: %{!mpower*: %(asm_default)}}" },
++#endif
++};
++
+ /* This will be called by the spec parser in gcc.c when it sees
+    a %:local_cpu_detect(args) construct.  Currently it will be called
+    with either "arch" or "tune" as argument depending on if -march=native
+    or -mtune=native is to be substituted.
+ 
++   Additionally it will be called with "asm" to select the appropriate flags
++   for the assembler.
++
+    It returns a string containing new command line parameters to be
+    put at the place of the above two options, depending on what CPU
+    this is executed.
+@@ -361,29 +465,35 @@ const char
+   const char *cache = "";
+   const char *options = "";
+   bool arch;
++  bool assembler;
++  size_t i;
+ 
+   if (argc < 1)
+     return NULL;
+ 
+   arch = strcmp (argv[0], "cpu") == 0;
+-  if (!arch && strcmp (argv[0], "tune"))
++  assembler = (!arch && strcmp (argv[0], "asm") == 0);
++  if (!arch && !assembler && strcmp (argv[0], "tune"))
+     return NULL;
+ 
++  if (! assembler)
++    {
+ #if defined (_AIX)
+-  cache = detect_caches_aix ();
++      cache = detect_caches_aix ();
+ #elif defined (__APPLE__)
+-  cache = detect_caches_darwin ();
++      cache = detect_caches_darwin ();
+ #elif defined (__FreeBSD__)
+-  cache = detect_caches_freebsd ();
+-  /* FreeBSD PPC does not provide any cache information yet.  */
+-  cache = "";
++      cache = detect_caches_freebsd ();
++      /* FreeBSD PPC does not provide any cache information yet.  */
++      cache = "";
+ #elif defined (__linux__)
+-  cache = detect_caches_linux ();
+-  /* PPC Linux does not provide any cache information yet.  */
+-  cache = "";
++      cache = detect_caches_linux ();
++      /* PPC Linux does not provide any cache information yet.  */
++      cache = "";
+ #else
+-  cache = "";
++      cache = "";
+ #endif
++    }
+ 
+ #if defined (_AIX)
+   cpu = detect_processor_aix ();
+@@ -397,6 +507,17 @@ const char
+   cpu = "powerpc";
+ #endif
+ 
++  if (assembler)
++    {
++      for (i = 0; i < sizeof (asm_names) / sizeof (asm_names[0]); i++)
++	{
++	  if (!asm_names[i].cpu || !strcmp (asm_names[i].cpu, cpu))
++	    return asm_names[i].asm_sw;
++	}
++
++      return NULL;
++    }
++
+   return concat (cache, "-m", argv[0], "=", cpu, " ", options, NULL);
+ }
+ 
+--- gcc/config/rs6000/sysv4.h	(.../trunk)	(revision 144557)
++++ gcc/config/rs6000/sysv4.h	(.../branches/ibm/power7-meissner)	(revision 144730)
+@@ -119,9 +119,9 @@ do {									\
+   else if (!strcmp (rs6000_abi_name, "i960-old"))			\
+     {									\
+       rs6000_current_abi = ABI_V4;					\
+-      target_flags |= (MASK_LITTLE_ENDIAN | MASK_EABI			\
+-		       | MASK_NO_BITFIELD_WORD);			\
++      target_flags |= (MASK_LITTLE_ENDIAN | MASK_EABI);			\
+       target_flags &= ~MASK_STRICT_ALIGN;				\
++      TARGET_NO_BITFIELD_WORD = 1;					\
+     }									\
+   else									\
+     {									\
+
diff --git a/gcc44-pr37959.patch b/gcc44-pr37959.patch
new file mode 100644
index 0000000..00363b1
--- /dev/null
+++ b/gcc44-pr37959.patch
@@ -0,0 +1,146 @@
+2009-03-18  Dodji Seketeli  <dodji at redhat.com>
+	    Jakub Jelinek  <jakub at redhat.com>
+
+	PR debug/37959
+	* dwarf2out.c (dwarf_attr_name): Handle DW_AT_explicit attribute.
+	(gen_subprogram_die): When a function is explicit, generate the DW_AT_explicit
+	attribute.
+	* langhooks.h (struct lang_hooks_for_decls): Add function_decl_explicit_p
+	langhook.
+	* langhooks-def.h (LANG_HOOKS_FUNCTION_DECL_EXPLICIT_P): Define.
+	(LANG_HOOKS_DECLS): Add LANG_HOOKS_FUNCTION_DECL_EXPLICIT_P.
+
+	* cp-objcp-common.h (LANG_HOOKS_FUNCTION_DECL_EXPLICIT_P): Define.
+	(cp_function_decl_explicit_p): New prototype.
+	* cp-objcp-common.c (cp_function_decl_explicit_p): New function.
+
+	* g++.dg/debug/dwarf2/explicit-constructor.C: New test.
+
+--- gcc/cp/cp-objcp-common.c.jj	2009-03-05 22:32:17.000000000 +0100
++++ gcc/cp/cp-objcp-common.c	2009-03-18 14:31:17.000000000 +0100
+@@ -1,5 +1,5 @@
+ /* Some code common to C++ and ObjC++ front ends.
+-   Copyright (C) 2004, 2007, 2008 Free Software Foundation, Inc.
++   Copyright (C) 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
+    Contributed by Ziemowit Laski  <zlaski at apple.com>
+ 
+ This file is part of GCC.
+@@ -203,6 +203,16 @@ cxx_staticp (tree arg)
+   return NULL_TREE;
+ }
+ 
++/* Return true if DECL is explicit member function.  */
++
++bool
++cp_function_decl_explicit_p (tree decl)
++{
++  return (decl
++	  && FUNCTION_FIRST_USER_PARMTYPE (decl) != void_list_node
++	  && DECL_NONCONVERTING_P (decl));
++}
++
+ /* Stubs to keep c-opts.c happy.  */
+ void
+ push_file_scope (void)
+--- gcc/cp/cp-objcp-common.h.jj	2009-03-02 16:21:33.000000000 +0100
++++ gcc/cp/cp-objcp-common.h	2009-03-18 14:33:51.000000000 +0100
+@@ -1,5 +1,5 @@
+ /* Language hooks common to C++ and ObjC++ front ends.
+-   Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
++   Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
+    Contributed by Ziemowit Laski  <zlaski at apple.com>
+ 
+ This file is part of GCC.
+@@ -26,6 +26,8 @@ along with GCC; see the file COPYING3.  
+ extern tree objcp_tsubst_copy_and_build (tree, tree, tsubst_flags_t,
+ 					 tree, bool);
+ 
++extern bool cp_function_decl_explicit_p (tree decl);
++
+ /* Lang hooks that are shared between C++ and ObjC++ are defined here.  Hooks
+    specific to C++ or ObjC++ go in cp/cp-lang.c and objcp/objcp-lang.c,
+    respectively.  */
+@@ -131,6 +133,8 @@ extern tree objcp_tsubst_copy_and_build 
+ #define LANG_HOOKS_TO_TARGET_CHARSET c_common_to_target_charset
+ #undef LANG_HOOKS_GIMPLIFY_EXPR
+ #define LANG_HOOKS_GIMPLIFY_EXPR cp_gimplify_expr
++#undef LANG_HOOKS_FUNCTION_DECL_EXPLICIT_P
++#define LANG_HOOKS_FUNCTION_DECL_EXPLICIT_P cp_function_decl_explicit_p
+ #undef LANG_HOOKS_OMP_PREDETERMINED_SHARING
+ #define LANG_HOOKS_OMP_PREDETERMINED_SHARING cxx_omp_predetermined_sharing
+ #undef LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR
+--- gcc/langhooks-def.h.jj	2009-03-18 14:24:43.000000000 +0100
++++ gcc/langhooks-def.h	2009-03-18 14:32:37.000000000 +0100
+@@ -190,6 +190,7 @@ extern tree lhd_make_node (enum tree_cod
+ #define LANG_HOOKS_GLOBAL_BINDINGS_P global_bindings_p
+ #define LANG_HOOKS_PUSHDECL	pushdecl
+ #define LANG_HOOKS_GETDECLS	getdecls
++#define LANG_HOOKS_FUNCTION_DECL_EXPLICIT_P hook_bool_tree_false
+ #define LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL lhd_warn_unused_global_decl
+ #define LANG_HOOKS_WRITE_GLOBALS write_global_declarations
+ #define LANG_HOOKS_DECL_OK_FOR_SIBCALL	lhd_decl_ok_for_sibcall
+@@ -209,6 +210,7 @@ extern tree lhd_make_node (enum tree_cod
+   LANG_HOOKS_GLOBAL_BINDINGS_P, \
+   LANG_HOOKS_PUSHDECL, \
+   LANG_HOOKS_GETDECLS, \
++  LANG_HOOKS_FUNCTION_DECL_EXPLICIT_P, \
+   LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL, \
+   LANG_HOOKS_WRITE_GLOBALS, \
+   LANG_HOOKS_DECL_OK_FOR_SIBCALL, \
+--- gcc/langhooks.h.jj	2009-03-18 14:24:43.000000000 +0100
++++ gcc/langhooks.h	2009-03-18 14:32:06.000000000 +0100
+@@ -159,6 +159,9 @@ struct lang_hooks_for_decls
+   /* Returns the chain of decls so far in the current scope level.  */
+   tree (*getdecls) (void);
+ 
++  /* Returns true if DECL is explicit member function.  */
++  bool (*function_decl_explicit_p) (tree);
++
+   /* Returns true when we should warn for an unused global DECL.
+      We will already have checked that it has static binding.  */
+   bool (*warn_unused_global) (const_tree);
+--- gcc/dwarf2out.c.jj	2009-03-18 14:24:43.000000000 +0100
++++ gcc/dwarf2out.c	2009-03-18 14:33:04.000000000 +0100
+@@ -5599,6 +5599,8 @@ dwarf_attr_name (unsigned int attr)
+       return "DW_AT_encoding";
+     case DW_AT_external:
+       return "DW_AT_external";
++    case DW_AT_explicit:
++      return "DW_AT_explicit";
+     case DW_AT_frame_base:
+       return "DW_AT_frame_base";
+     case DW_AT_friend:
+@@ -13620,6 +13622,11 @@ gen_subprogram_die (tree decl, dw_die_re
+ 	{
+ 	  add_AT_flag (subr_die, DW_AT_declaration, 1);
+ 
++	  /* If this is an explicit function declaration then generate
++	     a DW_AT_explicit attribute.  */
++          if (lang_hooks.decls.function_decl_explicit_p (decl))
++	    add_AT_flag (subr_die, DW_AT_explicit, 1);
++
+ 	  /* The first time we see a member function, it is in the context of
+ 	     the class to which it belongs.  We make sure of this by emitting
+ 	     the class first.  The next time is the definition, which is
+--- gcc/testsuite/g++.dg/debug/dwarf2/explicit-constructor.C.jj	2009-03-18 14:24:55.000000000 +0100
++++ gcc/testsuite/g++.dg/debug/dwarf2/explicit-constructor.C	2009-03-18 14:24:55.000000000 +0100
+@@ -0,0 +1,19 @@
++// Contributed by Dodji Seketeli <dodji at redhat.com>
++// Origin: PR c++
++// { dg-do compile }
++// { dg-options "-O -g -dA" }
++// { dg-final { scan-assembler-times "DW_AT_explicit" 2 } }
++
++struct Foo
++{
++  Foo () {}
++  explicit Foo (int) {}
++  Foo (char) {}
++  ~Foo () {};
++};
++
++void
++bar ()
++{
++  Foo foo;
++}
diff --git a/gcc44-pr38757.patch b/gcc44-pr38757.patch
new file mode 100644
index 0000000..c166411
--- /dev/null
+++ b/gcc44-pr38757.patch
@@ -0,0 +1,129 @@
+2009-03-18  Jakub Jelinek  <jakub at redhat.com>
+
+	PR debug/38757
+	* langhooks.h (struct lang_hooks): Add source_language langhook.
+	* langhooks-def.h (LANG_HOOKS_SOURCE_LANGUAGE): Define to NULL.
+	(LANG_HOOKS_INITIALIZER): Add LANG_HOOKS_SOURCE_LANGUAGE.
+	* c-lang.c (c_source_language): New function.
+	(LANG_HOOKS_SOURCE_LANGUAGE): Define.
+	* dwarf2out.c (add_prototyped_attribute): Add DW_AT_prototype
+	also for DW_LANG_{C,C99,ObjC}.
+	(gen_compile_unit_die): Use lang_hooks.source_language () to
+	determine if DW_LANG_C99 or DW_LANG_C89 should be returned.
+
+--- gcc/langhooks.h.jj	2009-03-02 09:45:47.000000000 +0100
++++ gcc/langhooks.h	2009-03-18 12:53:24.000000000 +0100
+@@ -1,5 +1,5 @@
+ /* The lang_hooks data structure.
+-   Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
++   Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+    Free Software Foundation, Inc.
+ 
+ This file is part of GCC.
+@@ -414,6 +414,10 @@ struct lang_hooks
+      if in the process TREE_CONSTANT or TREE_SIDE_EFFECTS need updating.  */
+   tree (*expr_to_decl) (tree expr, bool *tc, bool *se);
+ 
++  /* Return year of the source language standard version if the FE supports
++     multiple versions of the standard.  */
++  int (*source_language) (void);
++
+   /* Whenever you add entries here, make sure you adjust langhooks-def.h
+      and langhooks.c accordingly.  */
+ };
+--- gcc/langhooks-def.h.jj	2009-03-02 09:45:47.000000000 +0100
++++ gcc/langhooks-def.h	2009-03-18 12:53:45.000000000 +0100
+@@ -1,5 +1,5 @@
+ /* Default macros to initialize the lang_hooks data structure.
+-   Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
++   Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+    Free Software Foundation, Inc.
+    Contributed by Alexandre Oliva  <aoliva at redhat.com>
+ 
+@@ -113,6 +113,7 @@ extern void lhd_omp_firstprivatize_type_
+ #define LANG_HOOKS_EXPR_TO_DECL		lhd_expr_to_decl
+ #define LANG_HOOKS_TO_TARGET_CHARSET	lhd_to_target_charset
+ #define LANG_HOOKS_INIT_TS		lhd_do_nothing
++#define LANG_HOOKS_SOURCE_LANGUAGE	NULL
+ 
+ /* Attribute hooks.  */
+ #define LANG_HOOKS_ATTRIBUTE_TABLE		NULL
+@@ -270,6 +271,7 @@ extern tree lhd_make_node (enum tree_cod
+   LANG_HOOKS_BUILTIN_FUNCTION_EXT_SCOPE, \
+   LANG_HOOKS_INIT_TS,          \
+   LANG_HOOKS_EXPR_TO_DECL, \
++  LANG_HOOKS_SOURCE_LANGUAGE, \
+ }
+ 
+ #endif /* GCC_LANG_HOOKS_DEF_H */
+--- gcc/c-lang.c.jj	2009-02-20 15:06:14.000000000 +0100
++++ gcc/c-lang.c	2009-03-18 13:33:41.000000000 +0100
+@@ -1,6 +1,6 @@
+ /* Language-specific hook definitions for C front end.
+    Copyright (C) 1991, 1995, 1997, 1998,
+-   1999, 2000, 2001, 2003, 2004, 2005, 2007, 2008
++   1999, 2000, 2001, 2003, 2004, 2005, 2007, 2008, 2009
+    Free Software Foundation, Inc.
+ 
+ This file is part of GCC.
+@@ -37,6 +37,12 @@ along with GCC; see the file COPYING3.  
+ 
+ enum c_language_kind c_language = clk_c;
+ 
++static int
++c_source_language (void)
++{
++  return flag_isoc99 ? 1999 : 1989;
++}
++
+ /* Lang hooks common to C and ObjC are declared in c-objc-common.h;
+    consequently, there should be very few hooks below.  */
+ 
+@@ -44,6 +50,8 @@ enum c_language_kind c_language = clk_c;
+ #define LANG_HOOKS_NAME "GNU C"
+ #undef LANG_HOOKS_INIT
+ #define LANG_HOOKS_INIT c_objc_common_init
++#undef LANG_HOOKS_SOURCE_LANGUAGE
++#define LANG_HOOKS_SOURCE_LANGUAGE c_source_language
+ 
+ /* Each front end provides its own lang hook initializer.  */
+ const struct lang_hooks lang_hooks = LANG_HOOKS_INITIALIZER;
+--- gcc/dwarf2out.c.jj	2009-03-17 13:06:29.000000000 +0100
++++ gcc/dwarf2out.c	2009-03-18 12:55:36.000000000 +0100
+@@ -12470,9 +12470,18 @@ add_bit_size_attribute (dw_die_ref die, 
+ static inline void
+ add_prototyped_attribute (dw_die_ref die, tree func_type)
+ {
+-  if (get_AT_unsigned (comp_unit_die, DW_AT_language) == DW_LANG_C89
+-      && TYPE_ARG_TYPES (func_type) != NULL)
+-    add_AT_flag (die, DW_AT_prototyped, 1);
++  switch (get_AT_unsigned (comp_unit_die, DW_AT_language))
++    {
++    case DW_LANG_C:
++    case DW_LANG_C89:
++    case DW_LANG_C99:
++    case DW_LANG_ObjC:
++      if (TYPE_ARG_TYPES (func_type) != NULL)
++	add_AT_flag (die, DW_AT_prototyped, 1);
++      break;
++    default:
++      break;
++    }
+ }
+ 
+ /* Add an 'abstract_origin' attribute below a given DIE.  The DIE is found
+@@ -14419,7 +14428,13 @@ gen_compile_unit_die (const char *filena
+   else if (strcmp (language_string, "GNU Objective-C++") == 0)
+     language = DW_LANG_ObjC_plus_plus;
+   else
+-    language = DW_LANG_C89;
++    {
++      if (lang_hooks.source_language
++	  && lang_hooks.source_language () >= 1999)
++	language = DW_LANG_C99;
++      else
++	language = DW_LANG_C89;
++    }
+ 
+   add_AT_unsigned (die, DW_AT_language, language);
+   return die;
diff --git a/gcc44-pr39226.patch b/gcc44-pr39226.patch
new file mode 100644
index 0000000..9b99e6e
--- /dev/null
+++ b/gcc44-pr39226.patch
@@ -0,0 +1,108 @@
+2009-03-03  Jakub Jelinek  <jakub at redhat.com>
+
+	PR target/39226
+	* config/rs6000/rs6000.md (andsi3_internal5_nomc,
+	anddi3_internal2_nomc, anddi3_internal3_nomc): Removed.
+	(booldi3_internal3): Use boolean_or_operator instead of
+	boolean_operator.
+
+	* gcc.dg/pr39226.c: New test.
+
+--- gcc/config/rs6000/rs6000.md.jj	2009-03-02 18:09:02.000000000 +0100
++++ gcc/config/rs6000/rs6000.md	2009-03-03 10:02:37.771461086 +0100
+@@ -2999,20 +2999,6 @@
+   [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+    (set_attr "length" "8,4,4,4,8,8,8,8")])
+ 
+-(define_insn "*andsi3_internal5_nomc"
+-  [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y,??y,??y,?y")
+-        (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+-                            (match_operand:SI 2 "and_operand" "r,r,K,L,T"))
+-                    (const_int 0)))
+-   (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
+-        (and:SI (match_dup 1)
+-                (match_dup 2)))
+-   (clobber (match_scratch:CC 4 "=X,X,x,x,X"))]
+-  "TARGET_64BIT && !rs6000_gen_cell_microcode"
+-  "#"
+-  [(set_attr "type" "compare")
+-   (set_attr "length" "8,8,8,8,8")])
+-
+ (define_split
+   [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ 	(compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+@@ -7684,18 +7670,6 @@
+   [(set_attr "type" "compare,compare,delayed_compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+    (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+ 
+-(define_insn "*anddi3_internal2_nomc"
+-  [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y,?y,??y,??y,?y")
+-	(compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r")
+-			    (match_operand:DI 2 "and64_2_operand" "t,r,S,K,J,t"))
+-		    (const_int 0)))
+-   (clobber (match_scratch:DI 3 "=r,r,r,r,r,r"))
+-   (clobber (match_scratch:CC 4 "=X,X,X,x,x,X"))]
+-  "TARGET_64BIT && !rs6000_gen_cell_microcode"
+-  "#"
+-  [(set_attr "type" "delayed_compare,compare,compare,compare,compare,compare")
+-   (set_attr "length" "8,8,8,8,8,12")])
+-
+ (define_split
+   [(set (match_operand:CC 0 "cc_reg_operand" "")
+         (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+@@ -7747,18 +7721,6 @@
+   [(set_attr "type" "compare,compare,delayed_compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+    (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+ 
+-(define_insn "*anddi3_internal3_nomc"
+-  [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y,?y,??y,??y,?y")
+-	(compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r")
+-			    (match_operand:DI 2 "and64_2_operand" "t,r,S,K,J,t"))
+-		    (const_int 0)))
+-   (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+-	(and:DI (match_dup 1) (match_dup 2)))
+-   (clobber (match_scratch:CC 4 "=X,X,X,x,x,X"))]
+-  "TARGET_64BIT  && !rs6000_gen_cell_microcode"
+-  "#"
+-  [(set_attr "type" "delayed_compare,compare,compare,compare,compare,compare")
+-   (set_attr "length" "8,8,8,8,8,12")])
+ (define_split
+   [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ 	(compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+@@ -7915,7 +7877,7 @@
+ 
+ (define_insn "*booldi3_internal3"
+   [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+-	(compare:CC (match_operator:DI 4 "boolean_operator"
++	(compare:CC (match_operator:DI 4 "boolean_or_operator"
+ 	 [(match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ 	  (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ 	 (const_int 0)))
+--- gcc/testsuite/gcc.dg/pr39226.c.jj	2009-03-02 23:27:03.398459808 +0100
++++ gcc/testsuite/gcc.dg/pr39226.c	2009-03-02 23:26:19.696462209 +0100
+@@ -0,0 +1,25 @@
++/* PR target/39226 */
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++/* { dg-options "-O2 -mtune=cell -mminimal-toc" { target { powerpc*-*-* && lp64 } } } */
++
++struct A
++{
++  char *a;
++  unsigned int b : 1;
++  unsigned int c : 31;
++};
++
++struct B
++{
++  struct A *d;
++};
++
++void
++foo (struct B *x, unsigned long y)
++{
++  if (x->d[y].c)
++    return;
++  if (x->d[y].b)
++    x->d[y].a = 0;
++}
diff --git a/gcc44-raw-string.patch b/gcc44-raw-string.patch
index b1edad1..ecc5c5d 100644
--- a/gcc44-raw-string.patch
+++ b/gcc44-raw-string.patch
@@ -177,7 +177,7 @@
  
 --- libcpp/lex.c.jj	2008-09-05 12:59:49.000000000 +0200
 +++ libcpp/lex.c	2008-09-12 13:54:01.000000000 +0200
-@@ -609,10 +609,185 @@ create_literal (cpp_reader *pfile, cpp_t
+@@ -610,12 +610,186 @@ create_literal (cpp_reader *pfile, cpp_t
    token->val.str.text = dest;
  }
  
@@ -358,14 +358,17 @@
  /* Lexes a string, character constant, or angle-bracketed header file
     name.  The stored string contains the spelling, including opening
 -   quote and leading any leading 'L', 'u' or 'U'.  It returns the type
--   of the literal, or CPP_OTHER if it was not properly terminated.
+-   of the literal, or CPP_OTHER if it was not properly terminated, or
+-   CPP_LESS for an unterminated header name which must be relexed as
+-   normal tokens.
 +   quote and any leading 'L', 'u', 'U' or 'u8' and optional
 +   'R' modifier.  It returns the type of the literal, or CPP_OTHER
-+   if it was not properly terminated.
++   if it was not properly terminated, or CPP_LESS for an unterminated
++   header name which must be relexed as normal tokens.
  
     The spelling is NUL-terminated, but it is not guaranteed that this
     is the first NUL since embedded NULs are preserved.  */
-@@ -626,12 +801,24 @@ lex_string (cpp_reader *pfile, cpp_token
+@@ -629,12 +803,24 @@ lex_string (cpp_reader *pfile, cpp_token
  
    cur = base;
    terminator = *cur++;
@@ -393,7 +396,7 @@
    else if (terminator == '\'')
      type = (*base == 'L' ? CPP_WCHAR :
  	    *base == 'U' ? CPP_CHAR32 :
-@@ -1035,10 +1222,20 @@ _cpp_lex_direct (cpp_reader *pfile)
+@@ -1094,10 +1280,20 @@ _cpp_lex_direct (cpp_reader *pfile)
      case 'L':
      case 'u':
      case 'U':
@@ -415,7 +418,7 @@
  	    {
  	      lex_string (pfile, result, buffer->cur - 1);
  	      break;
-@@ -1054,7 +1251,7 @@ _cpp_lex_direct (cpp_reader *pfile)
+@@ -1113,7 +1309,7 @@ _cpp_lex_direct (cpp_reader *pfile)
      case 'y': case 'z':
      case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
      case 'G': case 'H': case 'I': case 'J': case 'K':
diff --git a/mingw32-gcc.spec b/mingw32-gcc.spec
index 4f5d212..84d6248 100644
--- a/mingw32-gcc.spec
+++ b/mingw32-gcc.spec
@@ -1,11 +1,11 @@
 %define __os_install_post /usr/lib/rpm/brp-compress %{nil}
 
-%define DATE 20090216
-%define SVNREV 144214
+%define DATE 20090319
+%define SVNREV 144967
 
 Name:           mingw32-gcc
 Version:        4.4.0
-Release:        0.6%{?dist}
+Release:        0.7%{?dist}
 Summary:        MinGW Windows cross-compiler (GCC) for C
 
 License:        GPLv3+ and GPLv2+ with exceptions
@@ -17,7 +17,6 @@ Source0:        gcc-%{version}-%{DATE}.tar.bz2
 Source1:        libgcc_post_upgrade.c
 Source2:        README.libgcjwebplugin.so
 Source3:        protoize.1
-Source5:        ftp://gcc.gnu.org/pub/gcc/infrastructure/cloog-ppl-0.15.tar.gz
 
 # Patches from Fedora's native gcc.
 Patch0:         gcc44-hack.patch
@@ -38,8 +37,13 @@ Patch16:        gcc44-libgomp-omp_h-multilib.patch
 Patch20:        gcc44-libtool-no-rpath.patch
 Patch21:        gcc44-cloog-dl.patch
 Patch22:        gcc44-raw-string.patch
-Patch23:        gcc44-pr39175.patch
-Patch24:        gcc44-diff.patch
+Patch24:        gcc44-atom.patch
+Patch25:        gcc44-pr39226.patch
+Patch26:        gcc44-power7.patch
+Patch27:        gcc44-power7-2.patch
+Patch28:        gcc44-pr38757.patch
+Patch29:        gcc44-pr37959.patch
+Patch30:        gcc44-memmove-opt.patch
 
 BuildRoot:      %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
 
@@ -136,8 +140,13 @@ MinGW Windows cross-compiler for FORTRAN.
 %patch20 -p0 -b .libtool-no-rpath~
 %patch21 -p0 -b .cloog-dl~
 %patch22 -p0 -b .raw-string~
-%patch23 -p0 -b .pr39175~
-%patch24 -p0 -b .diff~
+%patch24 -p0 -b .atom~
+%patch25 -p0 -b .pr39226~
+%patch26 -p0 -b .power7~
+%patch27 -p0 -b .power7-2~
+%patch28 -p0 -b .pr38757~
+%patch29 -p0 -b .pr37959~
+%patch30 -p0 -b .memmove-opt~
 
 
 %build
@@ -170,7 +179,7 @@ CC="%{__cc} ${RPM_OPT_FLAGS}" \
   --enable-languages="$languages" \
   --with-bugurl=http://bugzilla.redhat.com/bugzilla
 
-make all
+make %{?_smp_mflags} all
 
 popd
 
@@ -287,6 +296,10 @@ rm -rf $RPM_BUILD_ROOT
 
 
 %changelog
+* Mon Mar 23 2009 Richard W.M. Jones <rjones at redhat.com> - 4.4.0-0.7
+- New native Fedora version gcc 4.4.0 20090319 svn 144967.
+- Enable _smp_mflags.
+
 * Wed Mar  4 2009 Richard W.M. Jones <rjones at redhat.com> - 4.4.0-0.6
 - Fix libobjc and consequently Objective C and Objective C++ compilers.
 
diff --git a/sources b/sources
index 2568471..155c3d1 100644
--- a/sources
+++ b/sources
@@ -1,3 +1 @@
-2659f09c2e43ef8b7d4406321753f1b2  fastjar-0.97.tar.gz
-0f5cb535b5450a8d61b14f690700a54c  gcc-4.4.0-20090216.tar.bz2
-716b7a0823f96c9d02c1703a9c47d387  cloog-ppl-0.15.tar.gz
+2992035eaf092d72eb98ad16b173f737  gcc-4.4.0-20090319.tar.bz2


More information about the scm-commits mailing list