[gcc-6] 155/401: * Update to SVN 20160602 (r237043, 6.1.1) from the gcc-6-branch.
Ximin Luo
infinity0 at debian.org
Wed Apr 5 15:48:42 UTC 2017
This is an automated email from the git hooks/post-receive script.
infinity0 pushed a commit to branch pu/reproducible_builds
in repository gcc-6.
commit 57196843267d9a73d55294aaafb0b99f6a9bbc62
Author: doko <doko at 6ca36cf4-e1d1-0310-8c6f-e303bb2178ca>
Date: Thu Jun 2 21:08:55 2016 +0000
* Update to SVN 20160602 (r237043, 6.1.1) from the gcc-6-branch.
git-svn-id: svn://anonscm.debian.org/gcccvs/branches/sid/gcc-6@8868 6ca36cf4-e1d1-0310-8c6f-e303bb2178ca
---
debian/changelog | 16 +-
debian/patches/ada-kfreebsd.diff | 22 -
debian/patches/svn-updates.diff | 7041 +++++++++++++++++++++++++++++++++++++-
3 files changed, 7006 insertions(+), 73 deletions(-)
diff --git a/debian/changelog b/debian/changelog
index f1d6ffa..c130be1 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,22 +1,28 @@
gcc-6 (6.1.1-5) UNRELEASED; urgency=medium
- * Update to SVN 20160528 (r236852, 6.1.1) from the gcc-6-branch.
+ * Update to SVN 20160602 (r237043, 6.1.1) from the gcc-6-branch.
- Fix PR libstdc++/70762, PR libstdc++/69703, PR libstdc++/69703,
PR libstdc++/71038, PR libstdc++/71036, PR libstdc++/71037,
PR libstdc++/71005, PR libstdc++/71004, PR libstdc++/70609, PR c/71171,
PR middle-end/71279, PR c++/71147, PR c++/71257,
PR tree-optimization/70884, PR c++/71210, PR tree-optimization/71031,
PR c++/69872, PR c++/71257, PR c++/70344, PR c++/71184, PR fortran/66461,
- PR fortran/71204, PR libffi/65567.
+ PR fortran/71204, PR libffi/65567, PR c++/71349, PR target/71201,
+ PR middle-end/71371, PR debug/71057, PR target/71056 (ARM32),
+ PR tree-optimization/69068, PR middle-end/71002, PR bootstrap/71071,
+ PR c++/71372, PR c++/70972, PR c++/71166, PR c++/71227, PR c++/60095,
+ PR c++/69515, PR c++/69009, PR c++/71173, PR c++/70522, PR c++/70584,
+ PR c++/70735, PR c++/71306, PR c++/71349, PR c++/71105, PR c++/71147,
+ PR ada/71358, PR ada/71317, PR fortran/71156.
* Fix cross building libgnatprj on i386 targeting 64bit archs (YunQiang Su).
Closes: #823126.
* Detect hard float for non-linux or non-glibc arm-*-*eabihf builds (Helmut
Grohne). Closes: #823894.
* Update embedded timestamp setting patch, backported from the trunk.
- * gccgo: Combine combine gccgo's ld() and ldShared() methods in cmd/go (Michael
- Hudson-Doyle). LP: #1586872.
+ * gccgo: Combine combine gccgo's ld() and ldShared() methods
+ in cmd/go (Michael Hudson-Doyle). LP: #1586872.
- -- Matthias Klose <doko at debian.org> Sat, 28 May 2016 19:02:31 +0200
+ -- Matthias Klose <doko at debian.org> Thu, 02 Jun 2016 23:07:05 +0200
gcc-6 (6.1.1-4) unstable; urgency=medium
diff --git a/debian/patches/ada-kfreebsd.diff b/debian/patches/ada-kfreebsd.diff
index 96ff23a..ead5c3a 100644
--- a/debian/patches/ada-kfreebsd.diff
+++ b/debian/patches/ada-kfreebsd.diff
@@ -222,28 +222,6 @@ Index: b/src/gcc/ada/s-osinte-kfreebsd-gnu.ads
function clock_gettime
(clock_id : clockid_t;
-@@ -216,6 +215,11 @@ package System.OS_Interface is
- return int;
- pragma Import (C, clock_gettime, "clock_gettime");
-
-+ function clock_getres
-+ (clock_id : clockid_t;
-+ res : access timespec) return int;
-+ pragma Import (C, clock_getres, "clock_getres");
-+
- function To_Duration (TS : timespec) return Duration;
- pragma Inline (To_Duration);
-
-@@ -330,8 +334,7 @@ package System.OS_Interface is
- -- returns the stack base of the specified thread. Only call this function
- -- when Stack_Base_Available is True.
-
-- function Get_Page_Size return size_t;
-- function Get_Page_Size return Address;
-+ function Get_Page_Size return int;
- pragma Import (C, Get_Page_Size, "getpagesize");
- -- Returns the size of a page
-
@@ -437,31 +440,25 @@ package System.OS_Interface is
PTHREAD_PRIO_PROTECT : constant := 2;
PTHREAD_PRIO_INHERIT : constant := 1;
diff --git a/debian/patches/svn-updates.diff b/debian/patches/svn-updates.diff
index 1697e17..8bd6fb0 100644
--- a/debian/patches/svn-updates.diff
+++ b/debian/patches/svn-updates.diff
@@ -1,10 +1,10 @@
-# DP: updates from the 6 branch upto 20160528 (r236852).
+# DP: updates from the 6 branch upto 20160602 (r237043).
last_update()
{
cat > ${dir}LAST_UPDATED <EOF
-Sat May 28 18:51:34 CEST 2016
-Sat May 28 16:51:34 UTC 2016 (revision 236852)
+Thu Jun 2 19:49:56 CEST 2016
+Thu Jun 2 17:49:56 UTC 2016 (revision 237043)
EOF
}
@@ -2672,11 +2672,60 @@ Index: gcc/tree-chkp.c
bounds = chkp_make_addressed_object_bounds (TREE_OPERAND (ptr_src, 0), iter);
break;
+Index: gcc/graphite-isl-ast-to-gimple.c
+===================================================================
+--- a/src/gcc/graphite-isl-ast-to-gimple.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/graphite-isl-ast-to-gimple.c (.../branches/gcc-6-branch)
+@@ -1075,9 +1075,7 @@
+ static bool
+ bb_contains_loop_phi_nodes (basic_block bb)
+ {
+- gcc_assert (EDGE_COUNT (bb->preds) <= 2);
+-
+- if (bb->preds->length () == 1)
++ if (EDGE_COUNT (bb->preds) != 2)
+ return false;
+
+ unsigned depth = loop_depth (bb->loop_father);
+@@ -1792,7 +1790,6 @@
+ b1 = b2;
+ }
+
+- gcc_assert (b1);
+ return b1;
+ }
+
+@@ -2481,13 +2478,14 @@
+
+ gcc_assert (!bb_contains_loop_close_phi_nodes (bb));
+
++ /* TODO: Handle cond phi nodes with more than 2 predecessors. */
++ if (EDGE_COUNT (bb->preds) != 2)
++ return false;
++
+ if (dump_file)
+ fprintf (dump_file, "[codegen] copying cond phi nodes in bb_%d.\n",
+ new_bb->index);
+
+- /* Cond phi nodes should have exactly two arguments. */
+- gcc_assert (2 == EDGE_COUNT (bb->preds));
+-
+ for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
+ gsi_next (&psi))
+ {
Index: gcc/c-family/ChangeLog
===================================================================
--- a/src/gcc/c-family/ChangeLog (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/c-family/ChangeLog (.../branches/gcc-6-branch)
-@@ -1,3 +1,10 @@
+@@ -1,3 +1,18 @@
++2016-05-30 Jakub Jelinek <jakub at redhat.com>
++
++ PR c++/71349
++ * c-omp.c (c_omp_split_clauses): Put OMP_CLAUSE_DEPEND to
++ C_OMP_CLAUSE_SPLIT_TARGET. Put OMP_CLAUSE_NOWAIT to
++ C_OMP_CLAUSE_SPLIT_TARGET if combined with target construct,
++ instead of C_OMP_CLAUSE_SPLIT_FOR.
++
+2016-04-29 Cesar Philippidis <cesar at codesourcery.com>
+
+ PR middle-end/70626
@@ -2728,6 +2777,41 @@ Index: gcc/c-family/c-omp.c
OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
loop_clauses = clauses;
break;
+@@ -966,6 +983,7 @@
+ case OMP_CLAUSE_MAP:
+ case OMP_CLAUSE_IS_DEVICE_PTR:
+ case OMP_CLAUSE_DEFAULTMAP:
++ case OMP_CLAUSE_DEPEND:
+ s = C_OMP_CLAUSE_SPLIT_TARGET;
+ break;
+ case OMP_CLAUSE_NUM_TEAMS:
+@@ -981,7 +999,6 @@
+ s = C_OMP_CLAUSE_SPLIT_PARALLEL;
+ break;
+ case OMP_CLAUSE_ORDERED:
+- case OMP_CLAUSE_NOWAIT:
+ s = C_OMP_CLAUSE_SPLIT_FOR;
+ break;
+ case OMP_CLAUSE_SCHEDULE:
+@@ -1316,6 +1333,18 @@
+ else
+ s = C_OMP_CLAUSE_SPLIT_FOR;
+ break;
++ case OMP_CLAUSE_NOWAIT:
++ /* Nowait clause is allowed on target, for and sections, but
++ is not allowed on parallel for or parallel sections. Therefore,
++ put it on target construct if present, because that can only
++ be combined with parallel for{, simd} and not with for{, simd},
++ otherwise to the worksharing construct. */
++ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
++ != 0)
++ s = C_OMP_CLAUSE_SPLIT_TARGET;
++ else
++ s = C_OMP_CLAUSE_SPLIT_FOR;
++ break;
+ default:
+ gcc_unreachable ();
+ }
Index: gcc/c-family/c-common.h
===================================================================
--- a/src/gcc/c-family/c-common.h (.../tags/gcc_6_1_0_release)
@@ -2820,7 +2904,13 @@ Index: gcc/c/ChangeLog
===================================================================
--- a/src/gcc/c/ChangeLog (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/c/ChangeLog (.../branches/gcc-6-branch)
-@@ -1,3 +1,22 @@
+@@ -1,3 +1,28 @@
++2016-05-30 Jakub Jelinek <jakub at redhat.com>
++
++ PR c++/71349
++ * c-parser.c (c_parser_omp_for): Don't disallow nowait clause
++ when combined with target construct.
++
+2016-05-19 David Malcolm <dmalcolm at redhat.com>
+
+ Backport from trunk r236488.
@@ -3188,6 +3278,17 @@ Index: gcc/c/c-parser.c
tree block = c_begin_omp_parallel ();
tree clauses;
c_parser_oacc_loop (loc, parser, p_name, mask, &clauses, if_p);
+@@ -15094,7 +15094,9 @@
+
+ strcat (p_name, " for");
+ mask |= OMP_FOR_CLAUSE_MASK;
+- if (cclauses)
++ /* parallel for{, simd} disallows nowait clause, but for
++ target {teams distribute ,}parallel for{, simd} it should be accepted. */
++ if (cclauses && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
+ mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
+ /* Composite distribute parallel for{, simd} disallows ordered clause. */
+ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
Index: gcc/c/c-typeck.c
===================================================================
--- a/src/gcc/c/c-typeck.c (.../tags/gcc_6_1_0_release)
@@ -3246,7 +3347,7 @@ Index: gcc/DATESTAMP
+++ b/src/gcc/DATESTAMP (.../branches/gcc-6-branch)
@@ -1 +1 @@
-20160427
-+20160528
++20160602
Index: gcc/tree.h
===================================================================
--- a/src/gcc/tree.h (.../tags/gcc_6_1_0_release)
@@ -3273,7 +3374,22 @@ Index: gcc/fold-const.c
===================================================================
--- a/src/gcc/fold-const.c (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/fold-const.c (.../branches/gcc-6-branch)
-@@ -836,11 +836,10 @@
+@@ -117,14 +117,8 @@
+ static int operand_equal_for_comparison_p (tree, tree, tree);
+ static int twoval_comparison_p (tree, tree *, tree *, int *);
+ static tree eval_subst (location_t, tree, tree, tree, tree, tree);
+-static tree make_bit_field_ref (location_t, tree, tree,
+- HOST_WIDE_INT, HOST_WIDE_INT, int, int);
+ static tree optimize_bit_field_compare (location_t, enum tree_code,
+ tree, tree, tree);
+-static tree decode_field_reference (location_t, tree, HOST_WIDE_INT *,
+- HOST_WIDE_INT *,
+- machine_mode *, int *, int *, int *,
+- tree *, tree *);
+ static int simple_operand_p (const_tree);
+ static bool simple_operand_p_2 (tree);
+ static tree range_binop (enum tree_code, tree, tree, int, tree, int);
+@@ -836,11 +830,10 @@
*minus_litp = *litp, *litp = 0;
if (neg_conp_p)
*conp = negate_expr (*conp);
@@ -3288,7 +3404,7 @@ Index: gcc/fold-const.c
var = negate_expr (var);
}
}
-@@ -863,10 +862,12 @@
+@@ -863,10 +856,12 @@
else if (*minus_litp)
*litp = *minus_litp, *minus_litp = 0;
*conp = negate_expr (*conp);
@@ -3305,7 +3421,148 @@ Index: gcc/fold-const.c
}
return var;
-@@ -11631,9 +11632,9 @@
+@@ -3781,15 +3776,23 @@
+
+ /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
+ starting at BITPOS. The field is unsigned if UNSIGNEDP is nonzero
+- and uses reverse storage order if REVERSEP is nonzero. */
++ and uses reverse storage order if REVERSEP is nonzero. ORIG_INNER
++ is the original memory reference used to preserve the alias set of
++ the access. */
+
+ static tree
+-make_bit_field_ref (location_t loc, tree inner, tree type,
++make_bit_field_ref (location_t loc, tree inner, tree orig_inner, tree type,
+ HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
+ int unsignedp, int reversep)
+ {
+ tree result, bftype;
+
++ if (get_alias_set (inner) != get_alias_set (orig_inner))
++ inner = fold_build2 (MEM_REF, TREE_TYPE (inner),
++ build_fold_addr_expr (inner),
++ build_int_cst
++ (reference_alias_ptr_type (orig_inner), 0));
++
+ if (bitpos == 0 && !reversep)
+ {
+ tree size = TYPE_SIZE (TREE_TYPE (inner));
+@@ -3915,13 +3918,13 @@
+ and return. */
+ return fold_build2_loc (loc, code, compare_type,
+ fold_build2_loc (loc, BIT_AND_EXPR, unsigned_type,
+- make_bit_field_ref (loc, linner,
++ make_bit_field_ref (loc, linner, lhs,
+ unsigned_type,
+ nbitsize, nbitpos,
+ 1, lreversep),
+ mask),
+ fold_build2_loc (loc, BIT_AND_EXPR, unsigned_type,
+- make_bit_field_ref (loc, rinner,
++ make_bit_field_ref (loc, rinner, rhs,
+ unsigned_type,
+ nbitsize, nbitpos,
+ 1, rreversep),
+@@ -3966,8 +3969,8 @@
+ /* Make a new bitfield reference, shift the constant over the
+ appropriate number of bits and mask it with the computed mask
+ (in case this was a signed field). If we changed it, make a new one. */
+- lhs = make_bit_field_ref (loc, linner, unsigned_type, nbitsize, nbitpos, 1,
+- lreversep);
++ lhs = make_bit_field_ref (loc, linner, lhs, unsigned_type,
++ nbitsize, nbitpos, 1, lreversep);
+
+ rhs = const_binop (BIT_AND_EXPR,
+ const_binop (LSHIFT_EXPR,
+@@ -4006,11 +4009,12 @@
+ do anything with. */
+
+ static tree
+-decode_field_reference (location_t loc, tree exp, HOST_WIDE_INT *pbitsize,
++decode_field_reference (location_t loc, tree *exp_, HOST_WIDE_INT *pbitsize,
+ HOST_WIDE_INT *pbitpos, machine_mode *pmode,
+ int *punsignedp, int *preversep, int *pvolatilep,
+ tree *pmask, tree *pand_mask)
+ {
++ tree exp = *exp_;
+ tree outer_type = 0;
+ tree and_mask = 0;
+ tree mask, inner, offset;
+@@ -4047,6 +4051,8 @@
+ || TREE_CODE (inner) == PLACEHOLDER_EXPR)
+ return 0;
+
++ *exp_ = exp;
++
+ /* If the number of bits in the reference is the same as the bitsize of
+ the outer type, then the outer type gives the signedness. Otherwise
+ (in case of a small bitfield) the signedness is unchanged. */
+@@ -5655,19 +5661,19 @@
+
+ ll_reversep = lr_reversep = rl_reversep = rr_reversep = 0;
+ volatilep = 0;
+- ll_inner = decode_field_reference (loc, ll_arg,
++ ll_inner = decode_field_reference (loc, &ll_arg,
+ &ll_bitsize, &ll_bitpos, &ll_mode,
+ &ll_unsignedp, &ll_reversep, &volatilep,
+ &ll_mask, &ll_and_mask);
+- lr_inner = decode_field_reference (loc, lr_arg,
++ lr_inner = decode_field_reference (loc, &lr_arg,
+ &lr_bitsize, &lr_bitpos, &lr_mode,
+ &lr_unsignedp, &lr_reversep, &volatilep,
+ &lr_mask, &lr_and_mask);
+- rl_inner = decode_field_reference (loc, rl_arg,
++ rl_inner = decode_field_reference (loc, &rl_arg,
+ &rl_bitsize, &rl_bitpos, &rl_mode,
+ &rl_unsignedp, &rl_reversep, &volatilep,
+ &rl_mask, &rl_and_mask);
+- rr_inner = decode_field_reference (loc, rr_arg,
++ rr_inner = decode_field_reference (loc, &rr_arg,
+ &rr_bitsize, &rr_bitpos, &rr_mode,
+ &rr_unsignedp, &rr_reversep, &volatilep,
+ &rr_mask, &rr_and_mask);
+@@ -5829,12 +5835,14 @@
+ lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask);
+ if (lnbitsize == rnbitsize && xll_bitpos == xlr_bitpos)
+ {
+- lhs = make_bit_field_ref (loc, ll_inner, lntype, lnbitsize, lnbitpos,
++ lhs = make_bit_field_ref (loc, ll_inner, ll_arg,
++ lntype, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp, ll_reversep);
+ if (! all_ones_mask_p (ll_mask, lnbitsize))
+ lhs = build2 (BIT_AND_EXPR, lntype, lhs, ll_mask);
+
+- rhs = make_bit_field_ref (loc, lr_inner, rntype, rnbitsize, rnbitpos,
++ rhs = make_bit_field_ref (loc, lr_inner, lr_arg,
++ rntype, rnbitsize, rnbitpos,
+ lr_unsignedp || rr_unsignedp, lr_reversep);
+ if (! all_ones_mask_p (lr_mask, rnbitsize))
+ rhs = build2 (BIT_AND_EXPR, rntype, rhs, lr_mask);
+@@ -5856,11 +5864,11 @@
+ {
+ tree type;
+
+- lhs = make_bit_field_ref (loc, ll_inner, lntype,
++ lhs = make_bit_field_ref (loc, ll_inner, ll_arg, lntype,
+ ll_bitsize + rl_bitsize,
+ MIN (ll_bitpos, rl_bitpos),
+ ll_unsignedp, ll_reversep);
+- rhs = make_bit_field_ref (loc, lr_inner, rntype,
++ rhs = make_bit_field_ref (loc, lr_inner, lr_arg, rntype,
+ lr_bitsize + rr_bitsize,
+ MIN (lr_bitpos, rr_bitpos),
+ lr_unsignedp, lr_reversep);
+@@ -5925,7 +5933,8 @@
+ reference we will make. Unless the mask is all ones the width of
+ that field, perform the mask operation. Then compare with the
+ merged constant. */
+- result = make_bit_field_ref (loc, ll_inner, lntype, lnbitsize, lnbitpos,
++ result = make_bit_field_ref (loc, ll_inner, ll_arg,
++ lntype, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp, ll_reversep);
+
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask);
+@@ -11631,9 +11640,9 @@
/* Convert A ? 0 : 1 to !A. This prefers the use of NOT_EXPR
over COND_EXPR in cases such as floating point comparisons. */
if (integer_zerop (op1)
@@ -3318,7 +3575,25 @@ Index: gcc/fold-const.c
&& truth_value_p (TREE_CODE (arg0)))
return pedantic_non_lvalue_loc (loc,
fold_convert_loc (loc, type,
-@@ -13549,6 +13550,9 @@
+@@ -12305,7 +12314,8 @@
+ || TYPE_REFERENCE_TO (expr)
+ || TYPE_CACHED_VALUES_P (expr)
+ || TYPE_CONTAINS_PLACEHOLDER_INTERNAL (expr)
+- || TYPE_NEXT_VARIANT (expr)))
++ || TYPE_NEXT_VARIANT (expr)
++ || TYPE_ALIAS_SET_KNOWN_P (expr)))
+ {
+ /* Allow these fields to be modified. */
+ tree tmp;
+@@ -12315,6 +12325,7 @@
+ TYPE_POINTER_TO (tmp) = NULL;
+ TYPE_REFERENCE_TO (tmp) = NULL;
+ TYPE_NEXT_VARIANT (tmp) = NULL;
++ TYPE_ALIAS_SET (tmp) = -1;
+ if (TYPE_CACHED_VALUES_P (tmp))
+ {
+ TYPE_CACHED_VALUES_P (tmp) = 0;
+@@ -13549,6 +13560,9 @@
if (!DECL_P (base))
base = get_base_address (base);
@@ -3346,7 +3621,352 @@ Index: gcc/ChangeLog
===================================================================
--- a/src/gcc/ChangeLog (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/ChangeLog (.../branches/gcc-6-branch)
-@@ -1,3 +1,439 @@
+@@ -1,3 +1,784 @@
++2016-06-02 Kelvin Nilsen <kelvin at gcc.gnu.org>
++
++ Back port from trunk
++ 2016-05-19 Kelvin Nilsen <kelvin at gcc.gnu.org>
++
++ * config/rs6000/altivec.md (UNSPEC_DARN): New unspec constant.
++ (UNSPEC_DARN_32): New unspec constant.
++ (UNSPEC_DARN_RAW): New unspec constant.
++ (darn_32): New instruction.
++ (darn_raw): New instruction.
++ (darn): New instruction.
++ * config/rs6000/rs6000-builtin.def (RS6000_BUILTIN_0): Add
++ support and documentation for this macro.
++ (BU_P9_MISC_1): New macro definition.
++ (BU_P9_64BIT_MISC_0): New macro definition.
++ (BU_P9_MISC_0): New macro definition.
++ (darn_32): New builtin definition.
++ (darn_raw): New builtin definition.
++ (darn): New builtin definition.
++ * config/rs6000/rs6000.c: Add #define RS6000_BUILTIN_0 and #undef
++ RS6000_BUILTIN_0 directives to surround each occurrence of
++ #include "rs6000-builtin.def".
++ (rs6000_builtin_mask_calculate): Add in the RS6000_BTM_MODULO and
++ RS6000_BTM_64BIT flags to the returned mask, depending on
++ configuration.
++ (def_builtin): Correct an error in the assignments made to the
++ debugging variable attr_string.
++ (rs6000_expand_builtin): Add support for no-operand built-in
++ functions.
++ (builtin_function_type): Remove fatal_error assertion that is no
++ longer valid.
++ (rs6000_common_init_builtins): Add support for no-operand built-in
++ functions.
++ * config/rs6000/rs6000.h (RS6000_BTM_MODULO): New macro
++ definition.
++ (RS6000_BTM_PURE): Enhance comment to clarify intent of this flag
++ definition.
++ (RS6000_BTM_64BIT): New macro definition.
++ * doc/extend.texi: Document __builtin_darn (void),
++ __builtin_darn_raw (void), and __builtin_darn_32 (void) built-in
++ functions.
++
++2016-06-01 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ Back port from trunk
++ 2016-05-23 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ PR target/71201
++ * config/rs6000/altivec.md (altivec_vperm_<mode>_internal): Drop
++ ISA 3.0 xxperm fusion alternative.
++ (altivec_vperm_v8hiv16qi): Likewise.
++ (altivec_vperm_<mode>_uns_internal): Likewise.
++ (vperm_v8hiv4si): Likewise.
++ (vperm_v16qiv8hi): Likewise.
++
++ Back port from trunk
++ 2016-05-23 Michael Meissner <meissner at linux.vnet.ibm.com>
++ Kelvin Nilsen <kelvin at gcc.gnu.org>
++
++ * config/rs6000/rs6000.c (rs6000_expand_vector_set): Generate
++ vpermr/xxpermr on ISA 3.0.
++ (altivec_expand_vec_perm_le): Likewise.
++ * config/rs6000/altivec.md (UNSPEC_VPERMR): New unspec.
++ (altivec_vpermr_<mode>_internal): Add VPERMR/XXPERMR support for
++ ISA 3.0.
++
++ Back port from trunk
++ 2016-05-24 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ * config/rs6000/altivec.md (VParity): New mode iterator for vector
++ parity built-in functions.
++ (p9v_ctz<mode>2): Add support for ISA 3.0 vector count trailing
++ zeros.
++ (p9v_parity<mode>2): Likewise.
++ * config/rs6000/vector.md (VEC_IP): New mode iterator for vector
++ parity.
++ (ctz<mode>2): ISA 3.0 expander for vector count trailing zeros.
++ (parity<mode>2): ISA 3.0 expander for vector parity.
++ * config/rs6000/rs6000-builtin.def (BU_P9_MISC_1): New macros for
++ power9 built-ins.
++ (BU_P9_64BIT_MISC_0): Likewise.
++ (BU_P9_MISC_0): Likewise.
++ (BU_P9V_AV_1): Likewise.
++ (BU_P9V_AV_2): Likewise.
++ (BU_P9V_AV_3): Likewise.
++ (BU_P9V_AV_P): Likewise.
++ (BU_P9V_VSX_1): Likewise.
++ (BU_P9V_OVERLOAD_1): Likewise.
++ (BU_P9V_OVERLOAD_2): Likewise.
++ (BU_P9V_OVERLOAD_3): Likewise.
++ (VCTZB): Add vector count trailing zeros support.
++ (VCTZH): Likewise.
++ (VCTZW): Likewise.
++ (VCTZD): Likewise.
++ (VPRTYBD): Add vector parity support.
++ (VPRTYBQ): Likewise.
++ (VPRTYBW): Likewise.
++ (VCTZ): Add overloaded vector count trailing zeros support.
++ (VPRTYB): Add overloaded vector parity support.
++ * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
++ overloaded vector count trailing zeros and parity instructions.
++ * config/rs6000/rs6000.md (wd mode attribute): Add V1TI and TI for
++ vector parity support.
++ * config/rs6000/altivec.h (vec_vctz): Add ISA 3.0 vector count
++ trailing zeros support.
++ (vec_cntlz): Likewise.
++ (vec_vctzb): Likewise.
++ (vec_vctzd): Likewise.
++ (vec_vctzh): Likewise.
++ (vec_vctzw): Likewise.
++ (vec_vprtyb): Add ISA 3.0 vector parity support.
++ (vec_vprtybd): Likewise.
++ (vec_vprtybw): Likewise.
++ (vec_vprtybq): Likewise.
++ * doc/extend.texi (PowerPC AltiVec Built-in Functions): Document
++ the ISA 3.0 vector count trailing zeros and vector parity built-in
++ functions.
++
++ Back port from trunk
++ 2016-05-24 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ * config/rs6000/altivec.md (VNEG iterator): New iterator for
++ VNEGW/VNEGD instructions.
++ (p9_neg<mode>2): New insns for ISA 3.0 VNEGW/VNEGD.
++ (neg<mode>2): Add expander for V2DImode added in ISA 2.07, and
++ support for ISA 3.0 VNEGW/VNEGD instructions.
++
++ Back port from trunk
++ 2016-05-11 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ * config/rs6000/predicates.md (quad_memory_operand): Move most of
++ the code into quad_address_p and call it to share code with
++ vsx_quad_dform_memory_operand.
++ (vsx_quad_dform_memory_operand): New predicate for ISA 3.0 vector
++ d-form support.
++ * config/rs6000/rs6000.opt (-mlra): Switch to being an option mask
++ bit instead of being a separate word. Split -mpower9-dform into
++ two switches, -mpower9-dform-scalar and -mpower9-dform-vector.
++ * config/rs6000/rs6000.c (RELOAD_REG_QUAD_OFFSET): New addr_mask
++ for the register class supporting 128-bit quad word memory offsets.
++ (mode_supports_vsx_dform_quad): Helper function to return if the
++ register class uses quad word memory offsets.
++ (rs6000_debug_addr_mask): Add support for quad word memory offsets.
++ (rs6000_debug_reg_global): Always print if we are using LRA or not.
++ (rs6000_setup_reg_addr_masks): If ISA 3.0 vector d-form
++ instructions are enabled, set up the appropriate addr_masks for
++ 128-bit types.
++ (rs6000_init_hard_regno_mode_ok): wb constraint is now based on
++ -mpower9-dform-scalar, instead of -mpower9-dform.
++ (rs6000_option_override_internal): Split -mpower9-dform into two
++ switches, -mpower9-dform-scalar and -mpower9-dform-vector. The
++ -mpower9-dform switch sets or clears both. If we are not using
++ the LRA register allocator, do not enable -mpower9-dform-vector by
++ default. If we are using LRA, enable -mpower9-dform-vector and
++ -mvsx-timode if it is appropriate. Issue a warning if either
++ -mpower9-dform-vector or -mvsx-timode are explicitly used without
++ enabling LRA.
++ (quad_address_offset_p): New helper function to return if the
++ offset is legal for quad word memory instructions.
++ (quad_address_p): New function to determin if GPR or vector
++ register quad word memory addresses are legal.
++ (mem_operand_gpr): Validate quad word address offsets.
++ (reg_offset_addressing_ok_p): Add support for ISA 3.0 vector
++ d-form (register + offset) instructions.
++ (offsettable_ok_by_alignment): Likewise.
++ (rs6000_legitimate_offset_address_p): Likewise.
++ (legitimate_lo_sum_address_p): Likewise.
++ (rs6000_legitimize_address): Likewise.
++ (rs6000_legitimize_reload_address): Add more debug statements for
++ -mdebug=addr.
++ (rs6000_legitimate_address_p): Add support for ISA 3.0 vector
++ d-form instructions.
++ (rs6000_secondary_reload_memory): Add support for ISA 3.0 vector
++ d-form instructions. Distinguish different cases in debug
++ output. (rs6000_secondary_reload_inner): Add support for ISA 3.0 vector
++ d-form instructions.
++ (rs6000_preferred_reload_class): Likewise.
++ (rs6000_output_move_128bit): Add support for ISA 3.0 d-form
++ instructions. If ISA 3.0 is available, generate lxvx/stxvx instead
++ of the ISA 2.06 indexed memory instructions.
++ (rs6000_emit_prologue): If we have ISA 3.0 d-form instructions,
++ use them to save/restore the saved vector registers instead of
++ using Altivec instructions.
++ (rs6000_emit_epilogue): Likewise.
++ (rs6000_lra_p): Use TARGET_LRA instead of the old option word.
++ (rs6000_opt_masks): Split -mpower9-dform into
++ -mpower9-dform-scalar and -mpower9-dform-vector.
++ (rs6000_print_options_internal): Print -mno-<switch> if <switch>
++ was not selected.
++ * config/rs6000/vsx.md (p9_vecload_<mode>): Delete hack to emit
++ ISA 3.0 vector indexed memory instructions, and fold the code into
++ the normal mov<mode> patterns.
++ (p9_vecstore_<mode>): Likewise.
++ (vsx_mov<mode>): Add support for ISA 3.0 vector d-form
++ instructions.
++ (vsx_movti_64bit): Likewise.
++ (vsx_movti_32bit): Likewise.
++ * config/rs6000/constraints.md (wO constraint): New constraint for
++ ISA 3.0 vector d-form support.
++ * config/rs6000/rs6000-cpus.def (ISA_3_0_MASKS_SERVER): Use
++ -mpower9-dform-scalar instead of -mpower9-dform. Add note not to
++ include -mpower9-dform-vector until we switch over to LRA.
++ (POWERPC_MASKS): Add -mlra. Split -mpower9-dform into two.
++ switches, -mpower9-dform-scalar and -mpower9-dform-vector.
++ * config/rs6000/rs6000-protos.h (quad_address_p): Add declaration.
++ * doc/invoke.texi (RS/6000 and PowerPC Options): Add documentation
++ for -mpower9-dform and -mlra.
++ * doc/md.texi (wO constraint): Document wO constraint.
++
++2016-06-01 Eric Botcazou <ebotcazou at adacore.com>
++
++ * tree-vect-loop.c (vect_determine_vectorization_factor): Also compute
++ the factor for live Phi nodes.
++
++2016-06-01 Jakub Jelinek <jakub at redhat.com>
++
++ PR middle-end/71371
++ * gimplify.c (gimplify_omp_for): Temporarily clear gimplify_omp_ctxp
++ around creation of the temporary.
++
++2016-06-01 Eric Botcazou <ebotcazou at adacore.com>
++
++ * tree-vect-loop.c (vect_determine_vectorization_factor): Also take
++ into account live statements for mask producers.
++
++2016-05-31 Richard Biener <rguenther at suse.de>
++
++ Backport from mainline
++ 2016-05-11 Richard Biener <rguenther at suse.de>
++
++ PR debug/71057
++ * dwarf2out.c (retry_incomplete_types): Set early_dwarf.
++ (dwarf2out_finish): Move retry_incomplete_types call ...
++ (dwarf2out_early_finish): ... here.
++
++2016-05-31 Kyrylo Tkachov <kyrylo.tkachov at arm.com>
++
++ PR target/71056
++ * config/arm/arm-builtins.c (arm_builtin_vectorized_function): Return
++ NULL_TREE early if NEON is not available. Remove now redundant check
++ in ARM_CHECK_BUILTIN_MODE.
++
++2016-05-31 Tom de Vries <tom at codesourcery.com>
++
++ backport:
++ 2016-05-31 Tom de Vries <tom at codesourcery.com>
++
++ PR tree-optimization/69068
++ * graphite-isl-ast-to-gimple.c (copy_bb_and_scalar_dependences): Handle
++ phis with more than two args.
++
++2016-05-30 Andreas Tobler <andreast at gcc.gnu.org>
++
++ Backport from mainline
++ 2016-05-30 Andreas Tobler <andreast at gcc.gnu.org>
++
++ * config.gcc: Move hard float support for arm*hf*-*-freebsd* into
++ armv6*-*-freebsd* for FreeBSD 11. Eliminate the arm*hf*-*-freebsd*
++ target.
++
++2016-05-30 Bill Schmidt <wschmidt at linux.vnet.ibm.com>
++
++ Backport from mainline
++ 2016-04-29 Bill Schmidt <wschmidt at linux.vnet.ibm.com>
++
++ * config/rs6000/altivec.h: Change definitions of vec_xl and
++ vec_xst.
++ * config/rs6000/rs6000-builtin.def (LD_ELEMREV_V2DF): New.
++ (LD_ELEMREV_V2DI): New.
++ (LD_ELEMREV_V4SF): New.
++ (LD_ELEMREV_V4SI): New.
++ (LD_ELEMREV_V8HI): New.
++ (LD_ELEMREV_V16QI): New.
++ (ST_ELEMREV_V2DF): New.
++ (ST_ELEMREV_V2DI): New.
++ (ST_ELEMREV_V4SF): New.
++ (ST_ELEMREV_V4SI): New.
++ (ST_ELEMREV_V8HI): New.
++ (ST_ELEMREV_V16QI): New.
++ (XL): New.
++ (XST): New.
++ * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
++ descriptions for VSX_BUILTIN_VEC_XL and VSX_BUILTIN_VEC_XST.
++ * config/rs6000/rs6000.c (rs6000_builtin_mask_calculate): Map from
++ TARGET_P9_VECTOR to RS6000_BTM_P9_VECTOR.
++ (altivec_expand_builtin): Add handling for
++ VSX_BUILTIN_ST_ELEMREV_<MODE> and VSX_BUILTIN_LD_ELEMREV_<MODE>.
++ (rs6000_invalid_builtin): Add error-checking for
++ RS6000_BTM_P9_VECTOR.
++ (altivec_init_builtins): Define builtins used to implement vec_xl
++ and vec_xst.
++ (rs6000_builtin_mask_names): Define power9-vector.
++ * config/rs6000/rs6000.h (MASK_P9_VECTOR): Define.
++ (RS6000_BTM_P9_VECTOR): Define.
++ (RS6000_BTM_COMMON): Include RS6000_BTM_P9_VECTOR.
++ * config/rs6000/vsx.md (vsx_ld_elemrev_v2di): New define_insn.
++ (vsx_ld_elemrev_v2df): Likewise.
++ (vsx_ld_elemrev_v4sf): Likewise.
++ (vsx_ld_elemrev_v4si): Likewise.
++ (vsx_ld_elemrev_v8hi): Likewise.
++ (vsx_ld_elemrev_v16qi): Likewise.
++ (vsx_st_elemrev_v2df): Likewise.
++ (vsx_st_elemrev_v2di): Likewise.
++ (vsx_st_elemrev_v4sf): Likewise.
++ (vsx_st_elemrev_v4si): Likewise.
++ (vsx_st_elemrev_v8hi): Likewise.
++ (vsx_st_elemrev_v16qi): Likewise.
++ * doc/extend.texi: Add prototypes for vec_xl and vec_xst. Correct
++ grammar.
++
++2016-05-30 Richard Biener <rguenther at suse.de>
++
++ Backport from mainline
++ 2016-05-11 Richard Biener <rguenther at suse.de>
++
++ PR middle-end/71002
++ * alias.c (reference_alias_ptr_type): Preserve alias-set zero
++ if the langhook insists on it.
++ * fold-const.c (make_bit_field_ref): Add arg for the original
++ reference and preserve its alias-set.
++ (decode_field_reference): Take exp by reference and adjust it
++ to the original memory reference.
++ (optimize_bit_field_compare): Adjust callers.
++ (fold_truth_andor_1): Likewise.
++
++ 2016-05-13 Jakub Jelinek <jakub at redhat.com>
++
++ PR bootstrap/71071
++ * fold-const.c (fold_checksum_tree): Allow modification
++ of TYPE_ALIAS_SET during folding.
++
++2016-05-30 Eric Botcazou <ebotcazou at adacore.com>
++
++ * config/visium/visium.c (visium_split_double_add): Minor tweaks.
++ (visium_expand_copysign): Use gen_int_mode directly.
++ (visium_compute_frame_size): Minor tweaks.
++
++2016-05-30 Tom de Vries <tom at codesourcery.com>
++
++ backport:
++ 2016-05-30 Tom de Vries <tom at codesourcery.com>
++
++ PR tree-optimization/69067
++ * graphite-isl-ast-to-gimple.c (get_def_bb_for_const): Remove assert.
++
+2016-05-27 Eric Botcazou <ebotcazou at adacore.com>
+
+ * config/visium/visium-protos.h (split_double_move): Rename into...
@@ -3786,7 +4406,7 @@ Index: gcc/ChangeLog
2016-04-27 Release Manager
* GCC 6.1.0 released.
-@@ -49,7 +485,7 @@
+@@ -49,7 +830,7 @@
constant boolean.
2016-04-20 Andrew Pinski <apinski at cavium.com>
@@ -3795,6 +4415,451 @@ Index: gcc/ChangeLog
PR target/64971
* config/aarch64/aarch64.md (sibcall): Force call
+Index: gcc/testsuite/gcc.target/powerpc/vsx-elemrev-4.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-4.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-4.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,228 @@
++/* { dg-do compile { target { powerpc64-*-* } } } */
++/* { dg-skip-if "do not override mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-options "-mcpu=power9 -O0" } */
++/* { dg-final { scan-assembler-times "lxvx" 40 } } */
++/* { dg-final { scan-assembler-times "stxvx" 40 } } */
++
++#include <altivec.h>
++
++extern vector double vd, *vdp;
++extern vector signed long long vsll, *vsllp;
++extern vector unsigned long long vull, *vullp;
++extern vector float vf, *vfp;
++extern vector signed int vsi, *vsip;
++extern vector unsigned int vui, *vuip;
++extern vector signed short vss, *vssp;
++extern vector unsigned short vus, *vusp;
++extern vector signed char vsc, *vscp;
++extern vector unsigned char vuc, *vucp;
++extern double *dp;
++extern signed long long *sllp;
++extern unsigned long long *ullp;
++extern float *fp;
++extern signed int *sip;
++extern unsigned int *uip;
++extern signed short *ssp;
++extern unsigned short *usp;
++extern signed char *scp;
++extern unsigned char *ucp;
++
++void foo0 (void)
++{
++ vd = vec_xl (0, vdp);
++}
++
++void foo1 (void)
++{
++ vsll = vec_xl (0, vsllp);
++}
++
++void foo2 (void)
++{
++ vull = vec_xl (0, vullp);
++}
++
++void foo3 (void)
++{
++ vf = vec_xl (0, vfp);
++}
++
++void foo4 (void)
++{
++ vsi = vec_xl (0, vsip);
++}
++
++void foo5 (void)
++{
++ vui = vec_xl (0, vuip);
++}
++
++void foo6 (void)
++{
++ vss = vec_xl (0, vssp);
++}
++
++void foo7 (void)
++{
++ vus = vec_xl (0, vusp);
++}
++
++void foo8 (void)
++{
++ vsc = vec_xl (0, vscp);
++}
++
++void foo9 (void)
++{
++ vuc = vec_xl (0, vucp);
++}
++
++void foo10 (void)
++{
++ vec_xst (vd, 0, vdp);
++}
++
++void foo11 (void)
++{
++ vec_xst (vsll, 0, vsllp);
++}
++
++void foo12 (void)
++{
++ vec_xst (vull, 0, vullp);
++}
++
++void foo13 (void)
++{
++ vec_xst (vf, 0, vfp);
++}
++
++void foo14 (void)
++{
++ vec_xst (vsi, 0, vsip);
++}
++
++void foo15 (void)
++{
++ vec_xst (vui, 0, vuip);
++}
++
++void foo16 (void)
++{
++ vec_xst (vss, 0, vssp);
++}
++
++void foo17 (void)
++{
++ vec_xst (vus, 0, vusp);
++}
++
++void foo18 (void)
++{
++ vec_xst (vsc, 0, vscp);
++}
++
++void foo19 (void)
++{
++ vec_xst (vuc, 0, vucp);
++}
++
++void foo20 (void)
++{
++ vd = vec_xl (0, dp);
++}
++
++void foo21 (void)
++{
++ vsll = vec_xl (0, sllp);
++}
++
++void foo22 (void)
++{
++ vull = vec_xl (0, ullp);
++}
++
++void foo23 (void)
++{
++ vf = vec_xl (0, fp);
++}
++
++void foo24 (void)
++{
++ vsi = vec_xl (0, sip);
++}
++
++void foo25 (void)
++{
++ vui = vec_xl (0, uip);
++}
++
++void foo26 (void)
++{
++ vss = vec_xl (0, ssp);
++}
++
++void foo27 (void)
++{
++ vus = vec_xl (0, usp);
++}
++
++void foo28 (void)
++{
++ vsc = vec_xl (0, scp);
++}
++
++void foo29 (void)
++{
++ vuc = vec_xl (0, ucp);
++}
++
++void foo30 (void)
++{
++ vec_xst (vd, 0, dp);
++}
++
++void foo31 (void)
++{
++ vec_xst (vsll, 0, sllp);
++}
++
++void foo32 (void)
++{
++ vec_xst (vull, 0, ullp);
++}
++
++void foo33 (void)
++{
++ vec_xst (vf, 0, fp);
++}
++
++void foo34 (void)
++{
++ vec_xst (vsi, 0, sip);
++}
++
++void foo35 (void)
++{
++ vec_xst (vui, 0, uip);
++}
++
++void foo36 (void)
++{
++ vec_xst (vss, 0, ssp);
++}
++
++void foo37 (void)
++{
++ vec_xst (vus, 0, usp);
++}
++
++void foo38 (void)
++{
++ vec_xst (vsc, 0, scp);
++}
++
++void foo39 (void)
++{
++ vec_xst (vuc, 0, ucp);
++}
+Index: gcc/testsuite/gcc.target/powerpc/dform-1.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/dform-1.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/dform-1.c (.../branches/gcc-6-branch)
+@@ -1,7 +1,7 @@
+ /* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+ /* { dg-require-effective-target powerpc_p9vector_ok } */
+ /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+-/* { dg-options "-mcpu=power9 -mpower9-dform -O2" } */
++/* { dg-options "-mcpu=power9 -mpower9-dform -O2 -mlra" } */
+
+ #ifndef TYPE
+ #define TYPE double
+Index: gcc/testsuite/gcc.target/powerpc/darn-2.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/darn-2.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/darn-2.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,12 @@
++/* { dg-do compile { target { powerpc*-*-* } } } */
++/* { dg-options "-mcpu=power9" } */
++/* { dg-require-effective-target lp64 } */
++
++#include <altivec.h>
++
++long long get_raw_random ()
++{
++ return __builtin_darn_raw ();
++}
++
++/* { dg-final { scan-assembler "darn" } } */
+Index: gcc/testsuite/gcc.target/powerpc/p9-vparity.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/p9-vparity.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/p9-vparity.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,107 @@
++/* { dg-do compile { target { powerpc64*-*-* && lp64 } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_p9vector_ok } */
++/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-options "-mcpu=power9 -O2 -mlra -mvsx-timode" } */
++
++#include <altivec.h>
++
++vector int
++parity_v4si_1s (vector int a)
++{
++ return vec_vprtyb (a);
++}
++
++vector int
++parity_v4si_2s (vector int a)
++{
++ return vec_vprtybw (a);
++}
++
++vector unsigned int
++parity_v4si_1u (vector unsigned int a)
++{
++ return vec_vprtyb (a);
++}
++
++vector unsigned int
++parity_v4si_2u (vector unsigned int a)
++{
++ return vec_vprtybw (a);
++}
++
++vector long long
++parity_v2di_1s (vector long long a)
++{
++ return vec_vprtyb (a);
++}
++
++vector long long
++parity_v2di_2s (vector long long a)
++{
++ return vec_vprtybd (a);
++}
++
++vector unsigned long long
++parity_v2di_1u (vector unsigned long long a)
++{
++ return vec_vprtyb (a);
++}
++
++vector unsigned long long
++parity_v2di_2u (vector unsigned long long a)
++{
++ return vec_vprtybd (a);
++}
++
++vector __int128_t
++parity_v1ti_1s (vector __int128_t a)
++{
++ return vec_vprtyb (a);
++}
++
++vector __int128_t
++parity_v1ti_2s (vector __int128_t a)
++{
++ return vec_vprtybq (a);
++}
++
++__int128_t
++parity_ti_3s (__int128_t a)
++{
++ return vec_vprtyb (a);
++}
++
++__int128_t
++parity_ti_4s (__int128_t a)
++{
++ return vec_vprtybq (a);
++}
++
++vector __uint128_t
++parity_v1ti_1u (vector __uint128_t a)
++{
++ return vec_vprtyb (a);
++}
++
++vector __uint128_t
++parity_v1ti_2u (vector __uint128_t a)
++{
++ return vec_vprtybq (a);
++}
++
++__uint128_t
++parity_ti_3u (__uint128_t a)
++{
++ return vec_vprtyb (a);
++}
++
++__uint128_t
++parity_ti_4u (__uint128_t a)
++{
++ return vec_vprtybq (a);
++}
++
++/* { dg-final { scan-assembler "vprtybd" } } */
++/* { dg-final { scan-assembler "vprtybq" } } */
++/* { dg-final { scan-assembler "vprtybw" } } */
+Index: gcc/testsuite/gcc.target/powerpc/p8vector-int128-1.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/p8vector-int128-1.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/p8vector-int128-1.c (.../branches/gcc-6-branch)
+@@ -2,7 +2,7 @@
+ /* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+ /* { dg-require-effective-target powerpc_p8vector_ok } */
+ /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+-/* { dg-options "-mcpu=power8 -O3 -mvsx-timode" } */
++/* { dg-options "-mcpu=power8 -O3 -mvsx-timode -mlra" } */
+
+ #include <altivec.h>
+
+Index: gcc/testsuite/gcc.target/powerpc/dform-2.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/dform-2.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/dform-2.c (.../branches/gcc-6-branch)
+@@ -1,7 +1,7 @@
+ /* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+ /* { dg-require-effective-target powerpc_p9vector_ok } */
+ /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+-/* { dg-options "-mcpu=power9 -mpower9-dform -O2" } */
++/* { dg-options "-mcpu=power9 -mpower9-dform -O2 -mlra" } */
+
+ #ifndef TYPE
+ #define TYPE float
+Index: gcc/testsuite/gcc.target/powerpc/dform-3.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/dform-3.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/dform-3.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,39 @@
++/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
++/* { dg-require-effective-target powerpc_p9vector_ok } */
++/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-options "-mcpu=power9 -mpower9-dform -O2 -mlra" } */
++
++#ifndef TYPE
++#define TYPE vector double
++#endif
++
++struct foo {
++ TYPE a, b, c, d;
++};
++
++/* Test whether ISA 3.0 vector d-form instructions are implemented. */
++void
++add (struct foo *p)
++{
++ p->b = p->c + p->d;
++}
++
++/* Make sure we don't use direct moves to get stuff into GPR registers. */
++void
++gpr (struct foo *p)
++{
++ TYPE x = p->c;
++
++ __asm__ (" # reg = %0" : "+r" (x));
++
++ p->b = x;
++}
++
++/* { dg-final { scan-assembler "lxv " } } */
++/* { dg-final { scan-assembler "stxv " } } */
++/* { dg-final { scan-assembler-not "lxvx " } } */
++/* { dg-final { scan-assembler-not "stxvx " } } */
++/* { dg-final { scan-assembler-not "mfvsrd " } } */
++/* { dg-final { scan-assembler-not "mfvsrld " } } */
++/* { dg-final { scan-assembler "l\[dq\] " } } */
++/* { dg-final { scan-assembler "st\[dq\] " } } */
Index: gcc/testsuite/gcc.target/powerpc/pr70963.c
===================================================================
--- a/src/gcc/testsuite/gcc.target/powerpc/pr70963.c (.../tags/gcc_6_1_0_release)
@@ -3844,6 +4909,828 @@ Index: gcc/testsuite/gcc.target/powerpc/pr70963.c
+ check (vec_all_eq (c, y), "xvcvdpuxds");
+ check (vec_all_eq (d, x), "vec_ctf unsigned");
+}
+Index: gcc/testsuite/gcc.target/powerpc/p9-vneg.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/p9-vneg.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/p9-vneg.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,12 @@
++/* { dg-do compile { target { powerpc64*-*-* } } } */
++/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-require-effective-target powerpc_p9vector_ok } */
++/* { dg-options "-mcpu=power9 -O2" } */
++
++/* Verify P9 vector negate instructions. */
++
++vector long long v2di_neg (vector long long a) { return -a; }
++vector int v4si_neg (vector int a) { return -a; }
++
++/* { dg-final { scan-assembler "vnegd" } } */
++/* { dg-final { scan-assembler "vnegw" } } */
+Index: gcc/testsuite/gcc.target/powerpc/ctz-3.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/ctz-3.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/ctz-3.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,62 @@
++/* { dg-do compile { target { powerpc*-*-* } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_p9vector_ok } */
++/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-options "-mcpu=power9 -O2 -ftree-vectorize -fvect-cost-model=dynamic -fno-unroll-loops -fno-unroll-all-loops" } */
++
++#ifndef SIZE
++#define SIZE 1024
++#endif
++
++#ifndef ALIGN
++#define ALIGN 32
++#endif
++
++#define ALIGN_ATTR __attribute__((__aligned__(ALIGN)))
++
++#define DO_BUILTIN(PREFIX, TYPE, CTZ) \
++TYPE PREFIX ## _a[SIZE] ALIGN_ATTR; \
++TYPE PREFIX ## _b[SIZE] ALIGN_ATTR; \
++ \
++void \
++PREFIX ## _ctz (void) \
++{ \
++ unsigned long i; \
++ \
++ for (i = 0; i < SIZE; i++) \
++ PREFIX ## _a[i] = CTZ (PREFIX ## _b[i]); \
++}
++
++#if !defined(DO_LONG_LONG) && !defined(DO_LONG) && !defined(DO_INT) && !defined(DO_SHORT) && !defined(DO_CHAR)
++#define DO_INT 1
++#endif
++
++#if DO_LONG_LONG
++/* At the moment, only int is auto vectorized. */
++DO_BUILTIN (sll, long long, __builtin_ctzll)
++DO_BUILTIN (ull, unsigned long long, __builtin_ctzll)
++#endif
++
++#if defined(_ARCH_PPC64) && DO_LONG
++DO_BUILTIN (sl, long, __builtin_ctzl)
++DO_BUILTIN (ul, unsigned long, __builtin_ctzl)
++#endif
++
++#if DO_INT
++DO_BUILTIN (si, int, __builtin_ctz)
++DO_BUILTIN (ui, unsigned int, __builtin_ctz)
++#endif
++
++#if DO_SHORT
++DO_BUILTIN (ss, short, __builtin_ctz)
++DO_BUILTIN (us, unsigned short, __builtin_ctz)
++#endif
++
++#if DO_CHAR
++DO_BUILTIN (sc, signed char, __builtin_ctz)
++DO_BUILTIN (uc, unsigned char, __builtin_ctz)
++#endif
++
++/* { dg-final { scan-assembler-times "vctzw" 2 } } */
++/* { dg-final { scan-assembler-not "cnttzd" } } */
++/* { dg-final { scan-assembler-not "cnttzw" } } */
+Index: gcc/testsuite/gcc.target/powerpc/ctz-4.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/ctz-4.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/ctz-4.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,110 @@
++/* { dg-do compile { target { powerpc*-*-* } } } */
++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
++/* { dg-require-effective-target powerpc_p9vector_ok } */
++/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-options "-mcpu=power9 -O2" } */
++
++#include <altivec.h>
++
++vector signed char
++count_trailing_zeros_v16qi_1s (vector signed char a)
++{
++ return vec_vctz (a);
++}
++
++vector signed char
++count_trailing_zeros_v16qi_2s (vector signed char a)
++{
++ return vec_vctzb (a);
++}
++
++vector unsigned char
++count_trailing_zeros_v16qi_1u (vector unsigned char a)
++{
++ return vec_vctz (a);
++}
++
++vector unsigned char
++count_trailing_zeros_v16qi_2u (vector unsigned char a)
++{
++ return vec_vctzb (a);
++}
++
++vector short
++count_trailing_zeros_v8hi_1s (vector short a)
++{
++ return vec_vctz (a);
++}
++
++vector short
++count_trailing_zeros_v8hi_2s (vector short a)
++{
++ return vec_vctzh (a);
++}
++
++vector unsigned short
++count_trailing_zeros_v8hi_1u (vector unsigned short a)
++{
++ return vec_vctz (a);
++}
++
++vector unsigned short
++count_trailing_zeros_v8hi_2u (vector unsigned short a)
++{
++ return vec_vctzh (a);
++}
++
++vector int
++count_trailing_zeros_v4si_1s (vector int a)
++{
++ return vec_vctz (a);
++}
++
++vector int
++count_trailing_zeros_v4si_2s (vector int a)
++{
++ return vec_vctzw (a);
++}
++
++vector unsigned int
++count_trailing_zeros_v4si_1u (vector unsigned int a)
++{
++ return vec_vctz (a);
++}
++
++vector unsigned int
++count_trailing_zeros_v4si_2u (vector unsigned int a)
++{
++ return vec_vctzw (a);
++}
++
++vector long long
++count_trailing_zeros_v2di_1s (vector long long a)
++{
++ return vec_vctz (a);
++}
++
++vector long long
++count_trailing_zeros_v2di_2s (vector long long a)
++{
++ return vec_vctzd (a);
++}
++
++vector unsigned long long
++count_trailing_zeros_v2di_1u (vector unsigned long long a)
++{
++ return vec_vctz (a);
++}
++
++vector unsigned long long
++count_trailing_zeros_v2di_2u (vector unsigned long long a)
++{
++ return vec_vctzd (a);
++}
++
++/* { dg-final { scan-assembler "vctzb" } } */
++/* { dg-final { scan-assembler "vctzd" } } */
++/* { dg-final { scan-assembler "vctzh" } } */
++/* { dg-final { scan-assembler "vctzw" } } */
++/* { dg-final { scan-assembler-not "cnttzd" } } */
++/* { dg-final { scan-assembler-not "cnttzw" } } */
+Index: gcc/testsuite/gcc.target/powerpc/pr68805.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/pr68805.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr68805.c (.../branches/gcc-6-branch)
+@@ -1,6 +1,6 @@
+ /* { dg-do compile { target powerpc64le-*-* } } */
+ /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+-/* { dg-options "-O2 -mvsx-timode -mcpu=power8" } */
++/* { dg-options "-O2 -mvsx-timode -mcpu=power8 -mlra" } */
+
+ typedef struct bar {
+ void *a;
+Index: gcc/testsuite/gcc.target/powerpc/p9-permute.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/p9-permute.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/p9-permute.c (.../branches/gcc-6-branch)
+@@ -1,4 +1,4 @@
+-/* { dg-do compile { target { powerpc64le-*-* } } } */
++/* { dg-do compile { target { powerpc64*-*-* } } } */
+ /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+ /* { dg-options "-mcpu=power9 -O2" } */
+ /* { dg-require-effective-target powerpc_p9vector_ok } */
+@@ -17,5 +17,6 @@
+ return vec_perm (a, b, mask);
+ }
+
++/* expect xxpermr on little-endian, xxperm on big-endian */
+ /* { dg-final { scan-assembler "xxperm" } } */
+ /* { dg-final { scan-assembler-not "vperm" } } */
+Index: gcc/testsuite/gcc.target/powerpc/vsx-elemrev-1.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-1.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-1.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,143 @@
++/* { dg-do compile { target { powerpc64le*-*-* } } } */
++/* { dg-skip-if "do not override mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
++/* { dg-options "-mcpu=power8 -O0" } */
++/* { dg-final { scan-assembler-times "lxvd2x" 18 } } */
++/* { dg-final { scan-assembler-times "lxvw4x" 6 } } */
++/* { dg-final { scan-assembler-times "stxvd2x" 18 } } */
++/* { dg-final { scan-assembler-times "stxvw4x" 6 } } */
++/* { dg-final { scan-assembler-times "xxpermdi" 24 } } */
++
++#include <altivec.h>
++
++extern vector double vd, *vdp;
++extern vector signed long long vsll, *vsllp;
++extern vector unsigned long long vull, *vullp;
++extern vector float vf, *vfp;
++extern vector signed int vsi, *vsip;
++extern vector unsigned int vui, *vuip;
++extern double *dp;
++extern signed long long *sllp;
++extern unsigned long long *ullp;
++extern float *fp;
++extern signed int *sip;
++extern unsigned int *uip;
++
++void foo0 (void)
++{
++ vd = vec_xl (0, vdp);
++}
++
++void foo1 (void)
++{
++ vsll = vec_xl (0, vsllp);
++}
++
++void foo2 (void)
++{
++ vull = vec_xl (0, vullp);
++}
++
++void foo3 (void)
++{
++ vf = vec_xl (0, vfp);
++}
++
++void foo4 (void)
++{
++ vsi = vec_xl (0, vsip);
++}
++
++void foo5 (void)
++{
++ vui = vec_xl (0, vuip);
++}
++
++void foo6 (void)
++{
++ vec_xst (vd, 0, vdp);
++}
++
++void foo7 (void)
++{
++ vec_xst (vsll, 0, vsllp);
++}
++
++void foo8 (void)
++{
++ vec_xst (vull, 0, vullp);
++}
++
++void foo9 (void)
++{
++ vec_xst (vf, 0, vfp);
++}
++
++void foo10 (void)
++{
++ vec_xst (vsi, 0, vsip);
++}
++
++void foo11 (void)
++{
++ vec_xst (vui, 0, vuip);
++}
++
++void foo20 (void)
++{
++ vd = vec_xl (0, dp);
++}
++
++void foo21 (void)
++{
++ vsll = vec_xl (0, sllp);
++}
++
++void foo22 (void)
++{
++ vull = vec_xl (0, ullp);
++}
++
++void foo23 (void)
++{
++ vf = vec_xl (0, fp);
++}
++
++void foo24 (void)
++{
++ vsi = vec_xl (0, sip);
++}
++
++void foo25 (void)
++{
++ vui = vec_xl (0, uip);
++}
++
++void foo26 (void)
++{
++ vec_xst (vd, 0, dp);
++}
++
++void foo27 (void)
++{
++ vec_xst (vsll, 0, sllp);
++}
++
++void foo28 (void)
++{
++ vec_xst (vull, 0, ullp);
++}
++
++void foo29 (void)
++{
++ vec_xst (vf, 0, fp);
++}
++
++void foo30 (void)
++{
++ vec_xst (vsi, 0, sip);
++}
++
++void foo31 (void)
++{
++ vec_xst (vui, 0, uip);
++}
+Index: gcc/testsuite/gcc.target/powerpc/vsx-elemrev-2.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-2.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-2.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,234 @@
++/* { dg-do compile { target { powerpc64le*-*-* } } } */
++/* { dg-skip-if "do not override mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-options "-mcpu=power9 -O0" } */
++/* { dg-final { scan-assembler-times "lxvd2x" 6 } } */
++/* { dg-final { scan-assembler-times "lxvw4x" 6 } } */
++/* { dg-final { scan-assembler-times "lxvh8x" 4 } } */
++/* { dg-final { scan-assembler-times "lxvb16x" 4 } } */
++/* { dg-final { scan-assembler-times "stxvd2x" 6 } } */
++/* { dg-final { scan-assembler-times "stxvw4x" 6 } } */
++/* { dg-final { scan-assembler-times "stxvh8x" 4 } } */
++/* { dg-final { scan-assembler-times "stxvb16x" 4 } } */
++
++#include <altivec.h>
++
++extern vector double vd, *vdp;
++extern vector signed long long vsll, *vsllp;
++extern vector unsigned long long vull, *vullp;
++extern vector float vf, *vfp;
++extern vector signed int vsi, *vsip;
++extern vector unsigned int vui, *vuip;
++extern vector signed short vss, *vssp;
++extern vector unsigned short vus, *vusp;
++extern vector signed char vsc, *vscp;
++extern vector unsigned char vuc, *vucp;
++extern double *dp;
++extern signed long long *sllp;
++extern unsigned long long *ullp;
++extern float *fp;
++extern signed int *sip;
++extern unsigned int *uip;
++extern signed short *ssp;
++extern unsigned short *usp;
++extern signed char *scp;
++extern unsigned char *ucp;
++
++void foo0 (void)
++{
++ vd = vec_xl (0, vdp);
++}
++
++void foo1 (void)
++{
++ vsll = vec_xl (0, vsllp);
++}
++
++void foo2 (void)
++{
++ vull = vec_xl (0, vullp);
++}
++
++void foo3 (void)
++{
++ vf = vec_xl (0, vfp);
++}
++
++void foo4 (void)
++{
++ vsi = vec_xl (0, vsip);
++}
++
++void foo5 (void)
++{
++ vui = vec_xl (0, vuip);
++}
++
++void foo6 (void)
++{
++ vss = vec_xl (0, vssp);
++}
++
++void foo7 (void)
++{
++ vus = vec_xl (0, vusp);
++}
++
++void foo8 (void)
++{
++ vsc = vec_xl (0, vscp);
++}
++
++void foo9 (void)
++{
++ vuc = vec_xl (0, vucp);
++}
++
++void foo10 (void)
++{
++ vec_xst (vd, 0, vdp);
++}
++
++void foo11 (void)
++{
++ vec_xst (vsll, 0, vsllp);
++}
++
++void foo12 (void)
++{
++ vec_xst (vull, 0, vullp);
++}
++
++void foo13 (void)
++{
++ vec_xst (vf, 0, vfp);
++}
++
++void foo14 (void)
++{
++ vec_xst (vsi, 0, vsip);
++}
++
++void foo15 (void)
++{
++ vec_xst (vui, 0, vuip);
++}
++
++void foo16 (void)
++{
++ vec_xst (vss, 0, vssp);
++}
++
++void foo17 (void)
++{
++ vec_xst (vus, 0, vusp);
++}
++
++void foo18 (void)
++{
++ vec_xst (vsc, 0, vscp);
++}
++
++void foo19 (void)
++{
++ vec_xst (vuc, 0, vucp);
++}
++
++void foo20 (void)
++{
++ vd = vec_xl (0, dp);
++}
++
++void foo21 (void)
++{
++ vsll = vec_xl (0, sllp);
++}
++
++void foo22 (void)
++{
++ vull = vec_xl (0, ullp);
++}
++
++void foo23 (void)
++{
++ vf = vec_xl (0, fp);
++}
++
++void foo24 (void)
++{
++ vsi = vec_xl (0, sip);
++}
++
++void foo25 (void)
++{
++ vui = vec_xl (0, uip);
++}
++
++void foo26 (void)
++{
++ vss = vec_xl (0, ssp);
++}
++
++void foo27 (void)
++{
++ vus = vec_xl (0, usp);
++}
++
++void foo28 (void)
++{
++ vsc = vec_xl (0, scp);
++}
++
++void foo29 (void)
++{
++ vuc = vec_xl (0, ucp);
++}
++
++void foo30 (void)
++{
++ vec_xst (vd, 0, dp);
++}
++
++void foo31 (void)
++{
++ vec_xst (vsll, 0, sllp);
++}
++
++void foo32 (void)
++{
++ vec_xst (vull, 0, ullp);
++}
++
++void foo33 (void)
++{
++ vec_xst (vf, 0, fp);
++}
++
++void foo34 (void)
++{
++ vec_xst (vsi, 0, sip);
++}
++
++void foo35 (void)
++{
++ vec_xst (vui, 0, uip);
++}
++
++void foo36 (void)
++{
++ vec_xst (vss, 0, ssp);
++}
++
++void foo37 (void)
++{
++ vec_xst (vus, 0, usp);
++}
++
++void foo38 (void)
++{
++ vec_xst (vsc, 0, scp);
++}
++
++void foo39 (void)
++{
++ vec_xst (vuc, 0, ucp);
++}
+Index: gcc/testsuite/gcc.target/powerpc/p9-vpermr.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/p9-vpermr.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/p9-vpermr.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,21 @@
++/* { dg-do compile { target { powerpc64le-*-* } } } */
++/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
++/* { dg-options "-mcpu=power9 -O2" } */
++
++/* Test generation of VPERMR/XXPERMR on ISA 3.0 in little endian. */
++
++#include <altivec.h>
++
++vector long long
++permute (vector long long *p, vector long long *q, vector unsigned char mask)
++{
++ vector long long a = *p;
++ vector long long b = *q;
++
++ /* Force a, b to be in altivec registers to select vpermr insn. */
++ __asm__ (" # a: %x0, b: %x1" : "+v" (a), "+v" (b));
++
++ return vec_perm (a, b, mask);
++}
++
++/* { dg-final { scan-assembler "vpermr\|xxpermr" } } */
+Index: gcc/testsuite/gcc.target/powerpc/darn-0.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/darn-0.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/darn-0.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,12 @@
++/* { dg-do compile { target { powerpc*-*-* } } } */
++/* { dg-options "-mcpu=power9" } */
++
++/* This test should succeed on both 32- and 64-bit configurations. */
++#include <altivec.h>
++
++int get_random ()
++{
++ return __builtin_darn_32 ();
++}
++
++/* { dg-final { scan-assembler "darn" } } */
+Index: gcc/testsuite/gcc.target/powerpc/vsx-elemrev-3.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-3.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/vsx-elemrev-3.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,142 @@
++/* { dg-do compile { target { powerpc64-*-* } } } */
++/* { dg-skip-if "do not override mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
++/* { dg-options "-mcpu=power8 -O0" } */
++/* { dg-final { scan-assembler-times "lxvd2x" 16 } } */
++/* { dg-final { scan-assembler-times "lxvw4x" 8 } } */
++/* { dg-final { scan-assembler-times "stxvd2x" 16 } } */
++/* { dg-final { scan-assembler-times "stxvw4x" 8 } } */
++
++#include <altivec.h>
++
++extern vector double vd, *vdp;
++extern vector signed long long vsll, *vsllp;
++extern vector unsigned long long vull, *vullp;
++extern vector float vf, *vfp;
++extern vector signed int vsi, *vsip;
++extern vector unsigned int vui, *vuip;
++extern double *dp;
++extern signed long long *sllp;
++extern unsigned long long *ullp;
++extern float *fp;
++extern signed int *sip;
++extern unsigned int *uip;
++
++void foo0 (void)
++{
++ vd = vec_xl (0, vdp);
++}
++
++void foo1 (void)
++{
++ vsll = vec_xl (0, vsllp);
++}
++
++void foo2 (void)
++{
++ vull = vec_xl (0, vullp);
++}
++
++void foo3 (void)
++{
++ vf = vec_xl (0, vfp);
++}
++
++void foo4 (void)
++{
++ vsi = vec_xl (0, vsip);
++}
++
++void foo5 (void)
++{
++ vui = vec_xl (0, vuip);
++}
++
++void foo6 (void)
++{
++ vec_xst (vd, 0, vdp);
++}
++
++void foo7 (void)
++{
++ vec_xst (vsll, 0, vsllp);
++}
++
++void foo8 (void)
++{
++ vec_xst (vull, 0, vullp);
++}
++
++void foo9 (void)
++{
++ vec_xst (vf, 0, vfp);
++}
++
++void foo10 (void)
++{
++ vec_xst (vsi, 0, vsip);
++}
++
++void foo11 (void)
++{
++ vec_xst (vui, 0, vuip);
++}
++
++void foo20 (void)
++{
++ vd = vec_xl (0, dp);
++}
++
++void foo21 (void)
++{
++ vsll = vec_xl (0, sllp);
++}
++
++void foo22 (void)
++{
++ vull = vec_xl (0, ullp);
++}
++
++void foo23 (void)
++{
++ vf = vec_xl (0, fp);
++}
++
++void foo24 (void)
++{
++ vsi = vec_xl (0, sip);
++}
++
++void foo25 (void)
++{
++ vui = vec_xl (0, uip);
++}
++
++void foo26 (void)
++{
++ vec_xst (vd, 0, dp);
++}
++
++void foo27 (void)
++{
++ vec_xst (vsll, 0, sllp);
++}
++
++void foo28 (void)
++{
++ vec_xst (vull, 0, ullp);
++}
++
++void foo29 (void)
++{
++ vec_xst (vf, 0, fp);
++}
++
++void foo30 (void)
++{
++ vec_xst (vsi, 0, sip);
++}
++
++void foo31 (void)
++{
++ vec_xst (vui, 0, uip);
++}
+Index: gcc/testsuite/gcc.target/powerpc/darn-1.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/powerpc/darn-1.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/powerpc/darn-1.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,12 @@
++/* { dg-do compile { target { powerpc*-*-* } } } */
++/* { dg-options "-mcpu=power9" } */
++/* { dg-require-effective-target lp64 } */
++
++#include <altivec.h>
++
++long long get_conditioned_random ()
++{
++ return __builtin_darn ();
++}
++
++/* { dg-final { scan-assembler "darn" } } */
Index: gcc/testsuite/gcc.target/arm/interrupt-1.c
===================================================================
--- a/src/gcc/testsuite/gcc.target/arm/interrupt-1.c (.../tags/gcc_6_1_0_release)
@@ -3886,6 +5773,43 @@ Index: gcc/testsuite/gcc.target/arm/interrupt-2.c
/* { dg-final { scan-assembler "push\t{r0, r1, r2, r3, r4, r5, ip, lr}" } } */
-/* { dg-final { scan-assembler "pop\t{r0, r1, r2, r3, r4, r5, ip, pc}\\^" } } */
+/* { dg-final { scan-assembler "ldmfd\tsp!, {r0, r1, r2, r3, r4, r5, ip, pc}\\^" } } */
+Index: gcc/testsuite/gcc.target/arm/pr71056.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/arm/pr71056.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/arm/pr71056.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,32 @@
++/* PR target/71056. */
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_vfp3_ok } */
++/* { dg-options "-O3 -mfpu=vfpv3" } */
++
++/* Check that compiling for a non-NEON target doesn't try to introduce
++ a NEON vectorized builtin. */
++
++extern char *buff;
++int f2 ();
++struct T1
++{
++ int reserved[2];
++ unsigned int ip;
++ unsigned short cs;
++ unsigned short rsrv2;
++};
++void
++f3 (const char *p)
++{
++ struct T1 x;
++ __builtin_memcpy (&x, p, sizeof (struct T1));
++ x.reserved[0] = __builtin_bswap32 (x.reserved[0]);
++ x.reserved[1] = __builtin_bswap32 (x.reserved[1]);
++ x.ip = __builtin_bswap32 (x.ip);
++ x.cs = x.cs << 8 | x.cs >> 8;
++ x.rsrv2 = x.rsrv2 << 8 | x.rsrv2 >> 8;
++ if (f2 ())
++ {
++ __builtin_memcpy (buff, "\n", 1);
++ }
++}
Index: gcc/testsuite/gcc.target/arm/pr70830.c
===================================================================
--- a/src/gcc/testsuite/gcc.target/arm/pr70830.c (.../tags/gcc_6_1_0_release)
@@ -4089,6 +6013,19 @@ Index: gcc/testsuite/gcc.target/i386/pr70876.c
+
+ foo (x, x);
+}
+Index: gcc/testsuite/gcc.target/i386/iamcu/args.h
+===================================================================
+--- a/src/gcc/testsuite/gcc.target/i386/iamcu/args.h (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.target/i386/iamcu/args.h (.../branches/gcc-6-branch)
+@@ -30,7 +30,7 @@
+ /* Clear all scratch integer registers, excluding the one used to return
+ aggregate. */
+ #define clear_non_sret_int_hardware_registers \
+- asm __volatile__ ("xor %%edx, %%ebx\n\t" \
++ asm __volatile__ ("xor %%edx, %%edx\n\t" \
+ "xor %%ecx, %%ecx\n\t" \
+ ::: "edx", "ecx");
+
Index: gcc/testsuite/gcc.target/i386/fabsneg-1.c
===================================================================
--- a/src/gcc/testsuite/gcc.target/i386/fabsneg-1.c (.../tags/gcc_6_1_0_release)
@@ -4214,6 +6151,145 @@ Index: gcc/testsuite/gfortran.dg/gomp/pr70855.f90
+!$omp end workshare
+!$omp end parallel
+end program pr70855
+Index: gcc/testsuite/gfortran.dg/gomp/order-1.f90
+===================================================================
+--- a/src/gcc/testsuite/gfortran.dg/gomp/order-1.f90 (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gfortran.dg/gomp/order-1.f90 (.../branches/gcc-6-branch)
+@@ -0,0 +1,92 @@
++! { dg-do compile }
++
++module m
++ integer :: i
++end module m
++subroutine f1
++ type t
++ integer :: i
++ end type t
++ interface
++ integer function f3 (a, b)
++ !$omp declare simd (f3) uniform (a)
++ use m
++ import :: t
++ implicit none
++ type (t) :: a
++ integer :: b
++ end function f3
++ end interface
++ interface
++ integer function f4 (a, b)
++ use m
++ !$omp declare simd (f4) uniform (a)
++ import :: t
++ implicit none
++ type (t) :: a
++ integer :: b
++ end function f4
++ end interface
++ interface
++ integer function f5 (a, b)
++ use m
++ import :: t
++ !$omp declare simd (f5) uniform (a)
++ implicit none
++ type (t) :: a
++ integer :: b
++ end function f5
++ end interface
++ interface
++ integer function f6 (a, b)
++ use m
++ import :: t
++ implicit none
++ !$omp declare simd (f6) uniform (a)
++ type (t) :: a
++ integer :: b
++ end function f6
++ end interface
++ interface
++ integer function f7 (a, b)
++ use m
++ import :: t
++ implicit none
++ type (t) :: a
++ !$omp declare simd (f7) uniform (a)
++ integer :: b
++ end function f7
++ end interface
++ call f2
++contains
++ subroutine f2
++ !$omp threadprivate (t1)
++ use m
++ !$omp threadprivate (t2)
++ implicit none
++ !$omp threadprivate (t3)
++ integer, save :: t1, t2, t3, t4
++ !$omp threadprivate (t4)
++ t1 = 1; t2 = 2; t3 = 3; t4 = 4
++ end subroutine f2
++ subroutine f8
++ !$omp declare reduction (f8_1:real:omp_out = omp_out + omp_in)
++ use m
++ !$omp declare reduction (f8_2:real:omp_out = omp_out + omp_in)
++ implicit none
++ !$omp declare reduction (f8_3:real:omp_out = omp_out + omp_in)
++ integer :: j
++ !$omp declare reduction (f8_4:real:omp_out = omp_out + omp_in)
++ end subroutine f8
++ subroutine f9
++ !$omp declare target (f9_1)
++ use m
++ !$omp declare target (f9_2)
++ implicit none
++ !$omp declare target (f9_3)
++ !$omp declare target
++ integer, save :: f9_1, f9_2, f9_3, f9_4
++ !$omp declare target (f9_4)
++ f9_1 = 1; f9_2 = 2; f9_3 = 3; f9_4 = 4
++ end subroutine f9
++end subroutine f1
+Index: gcc/testsuite/gfortran.dg/gomp/order-2.f90
+===================================================================
+--- a/src/gcc/testsuite/gfortran.dg/gomp/order-2.f90 (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gfortran.dg/gomp/order-2.f90 (.../branches/gcc-6-branch)
+@@ -0,0 +1,37 @@
++! { dg-do compile }
++
++module m
++ integer :: i
++end module m
++subroutine f1
++ call f2
++contains
++ subroutine f2
++ use m
++ implicit none
++ integer, save :: t
++ t = 1
++ !$omp threadprivate (t1) ! { dg-error "Unexpected" }
++ end subroutine f2
++ subroutine f3
++ use m
++ implicit none
++ integer :: j
++ j = 1
++ !$omp declare reduction (foo:real:omp_out = omp_out + omp_in) ! { dg-error "Unexpected" }
++ end subroutine f3
++ subroutine f4
++ use m
++ implicit none
++ !$omp declare target
++ integer, save :: f4_1
++ f4_1 = 1
++ !$omp declare target (f4_1) ! { dg-error "Unexpected" }
++ !$omp declare target ! { dg-error "Unexpected" }
++ end subroutine f4
++ integer function f5 (a, b)
++ integer :: a, b
++ a = 1; b = 2
++ !$omp declare simd (f5) notinbranch ! { dg-error "Unexpected" }
++ end function f5
++end subroutine f1
Index: gcc/testsuite/gfortran.dg/goacc/reduction-2.f95
===================================================================
--- a/src/gcc/testsuite/gfortran.dg/goacc/reduction-2.f95 (.../tags/gcc_6_1_0_release)
@@ -4814,6 +6890,15 @@ Index: gcc/testsuite/gfortran.dg/submodule_14.f08
===================================================================
--- a/src/gcc/testsuite/gfortran.dg/submodule_14.f08 (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/testsuite/gfortran.dg/submodule_14.f08 (.../branches/gcc-6-branch)
+@@ -27,7 +27,7 @@
+ Call sub1 (x)
+ End If
+ End Procedure sub1
+- module function fcn1 (x) result(res)
++ recursive module function fcn1 (x) result(res)
+ integer, intent (inout) :: x
+ integer :: res
+ res = x - 1
@@ -46,4 +46,4 @@
x = 10
if (fcn1 (x) .ne. 0) call abort
@@ -5410,6 +7495,64 @@ Index: gcc/testsuite/gfortran.dg/dec_structure_7.f90
+ endif
+
+end program
+Index: gcc/testsuite/gfortran.dg/submodule_16.f08
+===================================================================
+--- a/src/gcc/testsuite/gfortran.dg/submodule_16.f08 (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gfortran.dg/submodule_16.f08 (.../branches/gcc-6-branch)
+@@ -0,0 +1,53 @@
++! { dg-do compile }
++!
++! Tests the fix for PR71156 in which the valid code (f7, f8 and f9 below)
++! triggered an error, while the invalid code (f1 to f6) compiled.
++!
++! Contributed by Damian Rousn <damian at sourceryinstitute.org>
++!
++module my_interface
++ implicit none
++ interface
++ module subroutine f1
++ end subroutine
++ module subroutine f2
++ end subroutine
++ module subroutine f3
++ end subroutine
++ elemental module subroutine f4
++ end subroutine
++ pure module subroutine f5
++ end subroutine
++ recursive module subroutine f6
++ end subroutine
++ elemental module subroutine f7
++ end subroutine
++ pure module subroutine f8
++ end subroutine
++ recursive module subroutine f9
++ end subroutine
++ end interface
++end module
++
++submodule(my_interface) my_implementation
++ implicit none
++contains
++ elemental module subroutine f1 ! { dg-error "Mismatch in ELEMENTAL attribute" }
++ end subroutine
++ pure module subroutine f2 ! { dg-error "Mismatch in PURE attribute" }
++ end subroutine
++ recursive module subroutine f3 ! { dg-error "Mismatch in RECURSIVE attribute" }
++ end subroutine
++ module subroutine f4 ! { dg-error "ELEMENTAL prefix" }
++ end subroutine
++ module subroutine f5 ! { dg-error "PURE prefix" }
++ end subroutine
++ module subroutine f6 ! { dg-error "RECURSIVE prefix" }
++ end subroutine
++ elemental module subroutine f7
++ end subroutine
++ pure module subroutine f8
++ end subroutine
++ recursive module subroutine f9
++ end subroutine
++end submodule
Index: gcc/testsuite/gcc.c-torture/compile/pr70916.c
===================================================================
--- a/src/gcc/testsuite/gcc.c-torture/compile/pr70916.c (.../tags/gcc_6_1_0_release)
@@ -5443,6 +7586,38 @@ Index: gcc/testsuite/gcc.c-torture/compile/pr70916.c
+ }
+ }
+}
+Index: gcc/testsuite/gnat.dg/opt56.adb
+===================================================================
+--- a/src/gcc/testsuite/gnat.dg/opt56.adb (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gnat.dg/opt56.adb (.../branches/gcc-6-branch)
+@@ -0,0 +1,15 @@
++-- { dg-do compile }
++-- { dg-options "-O3" }
++
++package body Opt56 is
++
++ function F (Values : Vector) return Boolean is
++ Result : Boolean := True;
++ begin
++ for I in Values'Range loop
++ Result := Result and Values (I) >= 0.0;
++ end loop;
++ return Result;
++ end;
++
++end Opt56;
+Index: gcc/testsuite/gnat.dg/opt56.ads
+===================================================================
+--- a/src/gcc/testsuite/gnat.dg/opt56.ads (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gnat.dg/opt56.ads (.../branches/gcc-6-branch)
+@@ -0,0 +1,7 @@
++package Opt56 is
++
++ type Vector is array (Positive range <>) of Float;
++
++ function F (Values : Vector) return Boolean;
++
++end Opt56;
Index: gcc/testsuite/gnat.dg/debug5.adb
===================================================================
--- a/src/gcc/testsuite/gnat.dg/debug5.adb (.../tags/gcc_6_1_0_release)
@@ -5527,6 +7702,75 @@ Index: gcc/testsuite/gcc.dg/graphite/pr70956.c
+/* { dg-options "-O2 -fgraphite-identity" } */
+
+#include "../tree-ssa/vrp66.c"
+Index: gcc/testsuite/gcc.dg/graphite/pr69067.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.dg/graphite/pr69067.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.dg/graphite/pr69067.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,28 @@
++/* { dg-do link } */
++/* { dg-options " -O1 -floop-nest-optimize" } */
++/* { dg-additional-options "-flto" { target lto } } */
++
++int a1, c1, cr, kt;
++int aa[2];
++
++int
++ce (void)
++{
++ while (a1 < 1)
++ {
++ int g8;
++ for (g8 = 0; g8 < 3; ++g8)
++ if (c1 != 0)
++ cr = aa[a1 * 2] = kt;
++ for (c1 = 0; c1 < 2; ++c1)
++ aa[c1] = cr;
++ ++a1;
++ }
++ return 0;
++}
++
++int
++main (void)
++{
++ return ce ();
++}
+Index: gcc/testsuite/gcc.dg/graphite/pr69068.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.dg/graphite/pr69068.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.dg/graphite/pr69068.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O1 -fgraphite-identity" } */
++
++int qo;
++int zh[2];
++
++void
++td (void)
++{
++ int ly, en;
++ for (ly = 0; ly < 2; ++ly)
++ for (en = 0; en < 2; ++en)
++ zh[en] = ((qo == 0) || (((qo * 2) != 0))) ? 1 : -1;
++}
+Index: gcc/testsuite/gcc.dg/pr71071.c
+===================================================================
+--- a/src/gcc/testsuite/gcc.dg/pr71071.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/gcc.dg/pr71071.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,12 @@
++/* PR bootstrap/71071 */
++/* { dg-do compile } *
++/* { dg-options "-O2" } */
++
++struct S { unsigned b : 1; } a;
++
++void
++foo ()
++{
++ if (a.b)
++ ;
++}
Index: gcc/testsuite/gcc.dg/torture/pr70935.c
===================================================================
--- a/src/gcc/testsuite/gcc.dg/torture/pr70935.c (.../tags/gcc_6_1_0_release)
@@ -5797,7 +8041,177 @@ Index: gcc/testsuite/ChangeLog
===================================================================
--- a/src/gcc/testsuite/ChangeLog (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/testsuite/ChangeLog (.../branches/gcc-6-branch)
-@@ -1,3 +1,318 @@
+@@ -1,3 +1,488 @@
++2016-06-02 Jakub Jelinek <jakub at redhat.com>
++
++ PR c++/71372
++ * c-c++-common/pr71372.c: New test.
++
++2016-06-02 Kelvin Nilsen <kelvin at gcc.gnu.org>
++
++ Back port from trunk
++ 2016-05-19 Kelvin Nilsen <kelvin at gcc.gnu.org>
++
++ * gcc.target/powerpc/darn-0.c: New test.
++ * gcc.target/powerpc/darn-1.c: New test.
++ * gcc.target/powerpc/darn-2.c: New test.
++
++2016-06-02 Paolo Carlini <paolo.carlini at oracle.com>
++
++ PR c++/70972
++ * g++.dg/cpp0x/inh-ctor20.C: New.
++ * g++.dg/cpp0x/inh-ctor21.C: Likewise.
++
++2016-06-02 Paolo Carlini <paolo.carlini at oracle.com>
++
++ * g++.dg/cpp1y/lambda-generic-static1.C: Use target c++14.
++ * g++.dg/cpp1y/lambda-generic-static2.C: Likewise.
++
++2016-06-01 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ Back port from trunk
++ 2016-05-23 Michael Meissner <meissner at linux.vnet.ibm.com>
++ Kelvin Nilsen <kelvin at gcc.gnu.org>
++
++ * gcc.target/powerpc/p9-permute.c: Run test on big endian as well
++ as little endian.
++
++ Back port from trunk
++ 2016-05-23 Michael Meissner <meissner at linux.vnet.ibm.com>
++ Kelvin Nilsen <kelvin at gcc.gnu.org>
++
++ * gcc.target/powerpc/p9-vpermr.c: New test for ISA 3.0 vpermr
++ support.
++
++ Back port from trunk
++ 2016-05-24 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ * gcc.target/powerpc/p9-vparity.c: New file to check ISA 3.0
++ vector parity built-in functions.
++ * gcc.target/powerpc/ctz-3.c: New file to check ISA 3.0 vector
++ count trailing zeros automatic vectorization.
++ * gcc.target/powerpc/ctz-4.c: New file to check ISA 3.0 vector
++ count trailing zeros built-in functions.
++
++ Back port from trunk
++ 2016-05-24 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ * gcc.target/powerpc/p9-vneg.c: New test for ISA 3.0 VNEGW/VNEGD
++ instructions.
++
++ Back port from trunk
++ 2016-05-11 Michael Meissner <meissner at linux.vnet.ibm.com>
++
++ * gcc.target/powerpc/dform-3.c: New test for ISA 3.0 vector d-form
++ support.
++ * gcc.target/powerpc/dform-1.c: Add -mlra option to silence
++ warning when using -mvsx-timode.
++ * gcc.target/powerpc/p8vector-int128-1.c: Likewise.
++ * gcc.target/powerpc/dform-2.c: Likewise.
++ * gcc.target/powerpc/pr68805.c: Likewise.
++
++2016-06-01 Paul Thomas <pault at gcc.gnu.org>
++
++ PR fortran/71156
++ * gfortran.dg/submodule_14.f08: Add missing recursive prefix
++ to the module procedure declaration.
++ * gfortran.dg/submodule_16.f08: New test.
++
++2016-06-01 Jakub Jelinek <jakub at redhat.com>
++
++ PR middle-end/71371
++ * c-c++-common/gomp/pr71371.c: New test.
++
++ * gfortran.dg/gomp/order-1.f90: New test.
++ * gfortran.dg/gomp/order-2.f90: New test.
++
++2016-06-01 Eric Botcazou <ebotcazou at adacore.com>
++
++ * gnat.dg/opt56.ad[sb]: New test.
++
++2016-05-31 Martin Sebor <msebor at redhat.com>
++
++ PR c++/71306
++ * g++.dg/warn/Wplacement-new-size-3.C: New test.
++
++2016-05-31 Richard Biener <rguenther at suse.de>
++
++ Backport from mainline
++ 2016-05-11 Richard Biener <rguenther at suse.de>
++
++ PR debug/71057
++ * g++.dg/debug/pr71057.C: New testcase.
++
++2016-05-31 Kyrylo Tkachov <kyrylo.tkachov at arm.com>
++
++ PR target/71056
++ * gcc.target/arm/pr71056.c: New test.
++
++2016-05-31 Tom de Vries <tom at codesourcery.com>
++
++ backport:
++ 2016-05-31 Tom de Vries <tom at codesourcery.com>
++
++ PR tree-optimization/69068
++ * gcc.dg/graphite/pr69068.c: New test.
++
++2016-05-30 Jakub Jelinek <jakub at redhat.com>
++
++ PR c++/71349
++ * c-c++-common/gomp/clauses-1.c (bar): Add dd argument. Add
++ nowait depend(inout: dd[0]) clauses where permitted.
++
++2016-05-30 Bill Schmidt <wschmidt at linux.vnet.ibm.com>
++
++ Backport from mainline
++ 2016-04-29 Bill Schmidt <wschmidt at linux.vnet.ibm.com>
++
++ * gcc.target/powerpc/vsx-elemrev-1.c: New.
++ * gcc.target/powerpc/vsx-elemrev-2.c: New.
++ * gcc.target/powerpc/vsx-elemrev-3.c: New.
++ * gcc.target/powerpc/vsx-elemrev-4.c: New.
++
++2016-05-30 Tom de Vries <tom at codesourcery.com>
++
++ backport:
++ 2016-05-30 Tom de Vries <tom at codesourcery.com>
++
++ * gcc.dg/graphite/pr69067.c (main): Remove superfluous argument in call
++ to ce.
++
++2016-05-30 Uros Bizjak <ubizjak at gmail.com>
++
++ * gcc.target/i386/iamcu/args.h (clear_non_sret_int_hardware_registers):
++ Use correct register when clearing %edx.
++
++2016-05-30 Richard Biener <rguenther at suse.de>
++
++ Backport from mainline
++ 2016-05-11 Richard Biener <rguenther at suse.de>
++
++ PR middle-end/71002
++ * g++.dg/torture/pr71002.C: New testcase.
++
++ 2016-05-13 Jakub Jelinek <jakub at redhat.com>
++
++ PR bootstrap/71071
++ * gcc.dg/pr71071.c: New test.
++
++2016-05-30 Tom de Vries <tom at codesourcery.com>
++
++ backport:
++ 2016-05-30 Tom de Vries <tom at codesourcery.com>
++
++ PR tree-optimization/69067
++ * gcc.dg/graphite/pr69067.c: New test.
++
++2016-05-29 Paolo Carlini <paolo.carlini at oracle.com>
++
++ PR c++/71105
++ * g++.dg/cpp0x/lambda/lambda-conv11.C: New.
++ * g++.dg/cpp1y/lambda-conv1.C: Likewise.
++ * g++.dg/cpp1y/lambda-conv2.C: Likewise.
++
+2016-05-27 Ilya Enkovich <ilya.enkovich at intel.com>
+
+ Backport from mainline r236810.
@@ -6141,6 +8555,23 @@ Index: gcc/testsuite/opt55.adb
+ end;
+
+end Opt55;
+Index: gcc/testsuite/g++.dg/debug/pr71057.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/debug/pr71057.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/debug/pr71057.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,12 @@
++// { dg-do compile }
++// { dg-options "-g" }
++template <typename _Tp> using decay_t = _Tp;
++template <typename> struct A;
++template <typename> struct B { B(A<int>); };
++template <typename> struct C {
++ template <typename U> using constructor = B<decay_t<U>>;
++ typedef constructor<int> dummy;
++};
++template <typename> struct D {};
++C<int> a;
++D<B<int>> fn1() { fn1, a; }
Index: gcc/testsuite/g++.dg/opt/pr71210-2.C
===================================================================
--- a/src/gcc/testsuite/g++.dg/opt/pr71210-2.C (.../tags/gcc_6_1_0_release)
@@ -6246,6 +8677,94 @@ Index: gcc/testsuite/g++.dg/ubsan/null-7.C
+ << A() << A() << A() << A() << A() << A() << A() << A() << A()
+ << A() << A() << A() << A() << A() << A() << A() << A() << A();
+}
+Index: gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv11.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv11.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv11.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,10 @@
++// PR c++/71105
++// { dg-do compile { target c++11 } }
++
++void foo()
++{
++ int i;
++ static_cast<void(*)()>([i]{}); // { dg-error "invalid static_cast" }
++ static_cast<void(*)()>([=]{}); // { dg-error "invalid static_cast" }
++ static_cast<void(*)()>([&]{}); // { dg-error "invalid static_cast" }
++}
+Index: gcc/testsuite/g++.dg/cpp0x/inh-ctor20.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp0x/inh-ctor20.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp0x/inh-ctor20.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,16 @@
++// PR c++/70972
++// { dg-do compile { target c++11 } }
++
++struct moveonly {
++ moveonly(moveonly&&) = default;
++ moveonly() = default;
++};
++
++struct A {
++ A(moveonly) {}
++};
++struct B : A {
++ using A::A;
++};
++
++B b(moveonly{});
+Index: gcc/testsuite/g++.dg/cpp0x/constexpr-array16.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp0x/constexpr-array16.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp0x/constexpr-array16.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,23 @@
++// PR c++/71166
++// { dg-do compile { target c++11 } }
++
++struct Foo { int value; };
++
++constexpr Foo MakeFoo() { return Foo{0}; }
++
++struct Bar {
++ Foo color = MakeFoo();
++};
++
++struct BarContainer {
++ Bar array[1];
++};
++
++Foo X ()
++{
++ return MakeFoo ();
++}
++
++void Foo() {
++ new BarContainer();
++}
+Index: gcc/testsuite/g++.dg/cpp0x/inh-ctor21.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp0x/inh-ctor21.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp0x/inh-ctor21.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,19 @@
++// PR c++/70972
++// { dg-do run { target c++11 } }
++
++struct abort_on_copy{
++ abort_on_copy(abort_on_copy&&) = default;
++ abort_on_copy(const abort_on_copy&) { __builtin_abort(); }
++ abort_on_copy() = default;
++};
++
++struct A {
++ A(abort_on_copy) {}
++};
++struct B : A {
++ using A::A;
++};
++
++int main() {
++ B b(abort_on_copy{});
++}
Index: gcc/testsuite/g++.dg/cpp0x/constexpr-recursion2.C
===================================================================
--- a/src/gcc/testsuite/g++.dg/cpp0x/constexpr-recursion2.C (.../tags/gcc_6_1_0_release)
@@ -6270,6 +8789,171 @@ Index: gcc/testsuite/g++.dg/cpp0x/auto48.C
+{
+ auto f = [&] { return f; }; // { dg-error "before deduction" }
+}
+Index: gcc/testsuite/g++.dg/torture/pr71002.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/torture/pr71002.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/torture/pr71002.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,160 @@
++// { dg-do run }
++
++using size_t = __SIZE_TYPE__;
++
++inline void* operator new(size_t, void* p) noexcept
++{ return p; }
++
++inline void operator delete(void*, void*)
++{ }
++
++struct long_t
++{
++ size_t is_short : 1;
++ size_t length : (__SIZEOF_SIZE_T__ * __CHAR_BIT__ - 1);
++ size_t capacity;
++ char* pointer;
++};
++
++union long_raw_t {
++ unsigned char data[sizeof(long_t)];
++ struct __attribute__((aligned(alignof(long_t)))) { } align;
++};
++
++struct short_header
++{
++ unsigned char is_short : 1;
++ unsigned char length : (__CHAR_BIT__ - 1);
++};
++
++struct short_t
++{
++ short_header h;
++ char data[23];
++};
++
++union repr_t
++{
++ long_raw_t r;
++ short_t s;
++
++ const short_t& short_repr() const
++ { return s; }
++
++ const long_t& long_repr() const
++ { return *static_cast<const long_t*>(static_cast<const void*>(&r)); }
++
++ short_t& short_repr()
++ { return s; }
++
++ long_t& long_repr()
++ { return *static_cast<long_t*>(static_cast<void*>(&r)); }
++};
++
++class string
++{
++public:
++ string()
++ {
++ short_t& s = m_repr.short_repr();
++ s.h.is_short = 1;
++ s.h.length = 0;
++ s.data[0] = '\0';
++ }
++
++ string(const char* str)
++ {
++ size_t length = __builtin_strlen(str);
++ if (length + 1 > 23) {
++ long_t& l = m_repr.long_repr();
++ l.is_short = 0;
++ l.length = length;
++ l.capacity = length + 1;
++ l.pointer = new char[l.capacity];
++ __builtin_memcpy(l.pointer, str, length + 1);
++ } else {
++ short_t& s = m_repr.short_repr();
++ s.h.is_short = 1;
++ s.h.length = length;
++ __builtin_memcpy(s.data, str, length + 1);
++ }
++ }
++
++ string(string&& other)
++ : string{}
++ {
++ swap_data(other);
++ }
++
++ ~string()
++ {
++ if (!is_short()) {
++ delete[] m_repr.long_repr().pointer;
++ }
++ }
++
++ size_t length() const
++ { return is_short() ? short_length() : long_length(); }
++
++private:
++ bool is_short() const
++ { return m_repr.s.h.is_short != 0; }
++
++ size_t short_length() const
++ { return m_repr.short_repr().h.length; }
++
++ size_t long_length() const
++ { return m_repr.long_repr().length; }
++
++ void swap_data(string& other)
++ {
++ if (is_short()) {
++ if (other.is_short()) {
++ repr_t tmp(m_repr);
++ m_repr = other.m_repr;
++ other.m_repr = tmp;
++ } else {
++ short_t short_backup(m_repr.short_repr());
++ m_repr.short_repr().~short_t();
++ ::new(&m_repr.long_repr()) long_t(other.m_repr.long_repr());
++ other.m_repr.long_repr().~long_t();
++ ::new(&other.m_repr.short_repr()) short_t(short_backup);
++ }
++ } else {
++ if (other.is_short()) {
++ short_t short_backup(other.m_repr.short_repr());
++ other.m_repr.short_repr().~short_t();
++ ::new(&other.m_repr.long_repr()) long_t(m_repr.long_repr());
++ m_repr.long_repr().~long_t();
++ ::new(&m_repr.short_repr()) short_t(short_backup);
++ } else {
++ long_t tmp(m_repr.long_repr());
++ m_repr.long_repr() = other.m_repr.long_repr();
++ other.m_repr.long_repr() = tmp;
++ }
++ }
++ }
++
++ repr_t m_repr;
++};
++
++struct foo
++{
++ __attribute__((noinline))
++ foo(string str)
++ : m_str{static_cast<string&&>(str)},
++ m_len{m_str.length()}
++ { }
++
++ string m_str;
++ size_t m_len;
++};
++
++int main()
++{
++ foo f{"the quick brown fox jumps over the lazy dog"};
++ if (f.m_len == 0) {
++ __builtin_abort();
++ }
++ return 0;
++}
Index: gcc/testsuite/g++.dg/ipa/ipa-pta-2.C
===================================================================
--- a/src/gcc/testsuite/g++.dg/ipa/ipa-pta-2.C (.../tags/gcc_6_1_0_release)
@@ -6312,6 +8996,172 @@ Index: gcc/testsuite/g++.dg/ipa/ipa-pta-2.C
+ abort ();
+ return 0;
+}
+Index: gcc/testsuite/g++.dg/cpp1y/lambda-conv1.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/lambda-conv1.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/lambda-conv1.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,13 @@
++// PR c++/71105
++// { dg-do compile { target c++14 } }
++
++void foo()
++{
++ int i;
++ static_cast<void(*)(int)>([i](auto){}); // { dg-error "invalid static_cast" }
++ static_cast<void(*)(int)>([=](auto){}); // { dg-error "invalid static_cast" }
++ static_cast<void(*)(int)>([&](auto){}); // { dg-error "invalid static_cast" }
++ static_cast<float(*)(float)>([i](auto x){ return x; }); // { dg-error "invalid static_cast" }
++ static_cast<float(*)(float)>([=](auto x){ return x; }); // { dg-error "invalid static_cast" }
++ static_cast<float(*)(float)>([&](auto x){ return x; }); // { dg-error "invalid static_cast" }
++}
+Index: gcc/testsuite/g++.dg/cpp1y/var-templ52.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/var-templ52.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/var-templ52.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,14 @@
++// PR c++/69515
++// { dg-do link { target c++14 } }
++
++struct A { A(int = 0) {} };
++
++template<class...> class meow;
++
++template<typename T> A foo;
++template<typename... Ts> A foo<meow<Ts...>> = 1;
++
++auto&& a = foo<meow<int>>;
++auto&& b = foo<meow<int, int>>;
++
++int main() {}
+Index: gcc/testsuite/g++.dg/cpp1y/lambda-conv2.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/lambda-conv2.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/lambda-conv2.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,23 @@
++// PR c++/71105
++// { dg-do compile { target c++14 } }
++
++template <typename T> T declval();
++template <typename, typename> struct is_same
++{ static constexpr bool value = false; };
++template <typename T> struct is_same<T, T>
++{ static constexpr bool value = true; };
++
++template <class F>
++struct indirected : F {
++ indirected(F f) : F(f) {}
++ template <class I>
++ auto operator()(I i) -> decltype(declval<F&>()(*i)) {
++ return static_cast<F&>(*this)(*i);
++ }
++};
++
++int main() {
++ auto f = [=](auto i) { return i + i; };
++ auto i = indirected<decltype(f)>{f};
++ static_assert(is_same<decltype(i(declval<int*>())), int>::value, "");
++}
+Index: gcc/testsuite/g++.dg/cpp1y/var-templ39a.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/var-templ39a.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/var-templ39a.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,27 @@
++// PR c++/66260
++// { dg-do compile { target c++14 } }
++
++template <class>
++bool foo = false;
++template <>
++bool foo<int> = true;
++template <class T, int N>
++bool foo<T[N]> = foo<T>;
++
++#define assert(X) if (!(X)) __builtin_abort();
++
++int main()
++{
++ // { dg-final { scan-assembler "_Z3fooIiE" } }
++ assert(foo<int>);
++ // { dg-final { scan-assembler "_Z3fooIdE" } }
++ assert(!foo<double>);
++ // { dg-final { scan-assembler "_Z3fooIA3_iE" } }
++ assert(foo<int[3]>);
++ // { dg-final { scan-assembler "_Z3fooIA3_dE" } }
++ assert(!foo<double[3]>);
++ // { dg-final { scan-assembler "_Z3fooIA2_A5_A3_iE" } }
++ assert(foo<int[2][5][3]>);
++ // { dg-final { scan-assembler "_Z3fooIA2_A5_A3_dE" } }
++ assert(!foo<double[2][5][3]>);
++}
+Index: gcc/testsuite/g++.dg/cpp1y/var-templ39.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/var-templ39.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/var-templ39.C (.../branches/gcc-6-branch)
+@@ -1,5 +1,5 @@
+ // PR c++/66260
+-// { dg-do compile { target c++14 } }
++// { dg-do assemble { target c++14 } }
+
+ template <class>
+ constexpr bool foo = false;
+Index: gcc/testsuite/g++.dg/cpp1y/lambda-generic-static1.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/lambda-generic-static1.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/lambda-generic-static1.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,13 @@
++// PR c++/70735
++// { dg-do run { target c++14 } }
++
++int main()
++{
++ static int a;
++ auto f = [](auto) { return a; };
++ if (f(0) != 0)
++ __builtin_abort();
++ a = 1;
++ if (f(0) != 1)
++ __builtin_abort();
++}
+Index: gcc/testsuite/g++.dg/cpp1y/lambda-generic-static2.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/lambda-generic-static2.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/lambda-generic-static2.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,19 @@
++// PR c++/70735
++// { dg-do run { target c++14 } }
++
++template <class T>
++static void g()
++{
++ static int a;
++ auto f = [](auto) { return a; };
++ if (f(0) != 0)
++ __builtin_abort();
++ a = 1;
++ if (f(0) != 1)
++ __builtin_abort();
++}
++
++int main()
++{
++ g<int>();
++}
+Index: gcc/testsuite/g++.dg/cpp1y/var-templ51.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp1y/var-templ51.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/cpp1y/var-templ51.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,11 @@
++// PR c++/60095
++// { dg-do link { target c++14 } }
++
++template <class>
++constexpr bool b = false;
++template<typename T>
++constexpr bool b<T*> = true;
++int main() {
++ b<int*>;
++ b<double*>;
++}
Index: gcc/testsuite/g++.dg/ext/flexary16.C
===================================================================
--- a/src/gcc/testsuite/g++.dg/ext/flexary16.C (.../tags/gcc_6_1_0_release)
@@ -6402,6 +9252,31 @@ Index: gcc/testsuite/g++.dg/vect/simd-clone-6.cc
+ check_vect ();
+ return 0;
+}
+Index: gcc/testsuite/g++.dg/lookup/scoped9.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/lookup/scoped9.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/lookup/scoped9.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,10 @@
++// PR c++/71173
++
++namespace foo {
++ namespace bar {
++ class foo {};
++ }
++ class baz {};
++}
++using namespace foo::bar;
++::foo::baz mybaz;
+Index: gcc/testsuite/g++.dg/lookup/scoped10.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/lookup/scoped10.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/lookup/scoped10.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,5 @@
++namespace A { }
++namespace N { struct A; }
++using namespace N;
++
++struct ::A *p;
Index: gcc/testsuite/g++.dg/gomp/declare-simd-6.C
===================================================================
--- a/src/gcc/testsuite/g++.dg/gomp/declare-simd-6.C (.../tags/gcc_6_1_0_release)
@@ -6444,6 +9319,68 @@ Index: gcc/testsuite/g++.dg/gomp/declare-simd-6.C
+int f15 (S a);
+#pragma omp declare simd linear(a:1) // { dg-error "applied to non-integral non-pointer variable" }
+int f16 (S a);
+Index: gcc/testsuite/g++.dg/other/i386-10.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/other/i386-10.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/other/i386-10.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,12 @@
++// { dg-do compile { target i?86-*-* x86_64-*-* } }
++// { dg-options -maes }
++
++typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
++
++int main()
++{
++ const char index = 1;
++ __m128i r = { };
++
++ r = __builtin_ia32_aeskeygenassist128 (r, (int)(index));
++}
+Index: gcc/testsuite/g++.dg/warn/Wplacement-new-size-3.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/warn/Wplacement-new-size-3.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/warn/Wplacement-new-size-3.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,40 @@
++// PR c++/71306 - bogus -Wplacement-new with an array element
++// { dg-do compile }
++// { dg-options "-Wplacement-new" }
++
++void* operator new (__SIZE_TYPE__, void *p) { return p; }
++
++struct S64 { char c [64]; };
++
++S64 s2 [2];
++S64* ps2 [2];
++S64* ps2_2 [2][2];
++
++void* pv2 [2];
++
++void f ()
++{
++ char a [2][sizeof (S64)];
++
++ new (a) S64;
++ new (a [0]) S64;
++ new (a [1]) S64;
++
++ // Verify there is no warning with buffers of sufficient size.
++ new (&s2 [0]) S64;
++ new (&s2 [1]) S64;
++
++ // ..and no warning with pointers to buffers of unknown size.
++ new (ps2 [0]) S64;
++ new (ps2 [1]) S64;
++
++ // But a warning when using the ps2_2 array itself as opposed
++ // to the pointers it's elements might point to.
++ new (ps2_2 [0]) S64; // { dg-warning "placement new" }
++ new (ps2_2 [1]) S64; // { dg-warning "placement new" }
++
++ // ..and no warning again with pointers to buffers of unknown
++ // size.
++ new (pv2 [0]) S64;
++ new (pv2 [1]) S64;
++}
Index: gcc/testsuite/g++.dg/warn/Wno-narrowing1.C
===================================================================
--- a/src/gcc/testsuite/g++.dg/warn/Wno-narrowing1.C (.../tags/gcc_6_1_0_release)
@@ -6486,6 +9423,27 @@ Index: gcc/testsuite/g++.dg/template/pr70466-2.C
+ foo (&B::bar);
+ return 0;
+}
+Index: gcc/testsuite/g++.dg/template/friend62.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/template/friend62.C (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/g++.dg/template/friend62.C (.../branches/gcc-6-branch)
+@@ -0,0 +1,16 @@
++// PR c++/71227
++// { dg-options "" }
++
++class A {
++ public:
++ template<typename T>
++ friend int f(int x, T v) { // { dg-message "declaration" }
++ return x + v;
++ }
++};
++
++
++template<>
++int f(int x, int v) { // { dg-warning "friend" }
++ return x + v;
++}
Index: gcc/testsuite/g++.dg/template/pr70466-1.C
===================================================================
--- a/src/gcc/testsuite/g++.dg/template/pr70466-1.C (.../tags/gcc_6_1_0_release)
@@ -6518,6 +9476,25 @@ Index: gcc/testsuite/g++.dg/template/pr70466-1.C
+ foo (&B::bar);
+ return 0;
+}
+Index: gcc/testsuite/c-c++-common/pr71372.c
+===================================================================
+--- a/src/gcc/testsuite/c-c++-common/pr71372.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/c-c++-common/pr71372.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,14 @@
++/* PR c++/71372 */
++/* { dg-do compile } */
++/* { dg-options "-O2 -fdump-tree-optimized" } */
++
++void
++foo (volatile int *p, int q)
++{
++ *(volatile int *)p = 0;
++ *(p + (q - q) + 1) = 0;
++ *(p + (q - q) + 2) = 0;
++ *(p + (q - q) + 3) = 0;
++}
++
++/* { dg-final { scan-tree-dump-times " ={v} " 4 "optimized" } } */
Index: gcc/testsuite/c-c++-common/asan/pr70712.c
===================================================================
--- a/src/gcc/testsuite/c-c++-common/asan/pr70712.c (.../tags/gcc_6_1_0_release)
@@ -6572,6 +9549,129 @@ Index: gcc/testsuite/c-c++-common/pr69669.c
}
+
+/* { dg-final { scan-rtl-dump-times "mem:QI" 1 "final" } } */
+Index: gcc/testsuite/c-c++-common/gomp/pr71371.c
+===================================================================
+--- a/src/gcc/testsuite/c-c++-common/gomp/pr71371.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/c-c++-common/gomp/pr71371.c (.../branches/gcc-6-branch)
+@@ -0,0 +1,25 @@
++/* PR middle-end/71371 */
++/* { dg-do compile } */
++
++void baz (int *);
++
++void
++foo (void)
++{
++ int i;
++ #pragma omp taskloop
++ for (i = 0; i < 100; i++)
++ baz (&i);
++}
++
++void
++bar (void)
++{
++ int i;
++ #pragma omp parallel
++ {
++ #pragma omp for
++ for (i = 0; i < 100; i++)
++ baz (&i);
++ }
++}
+Index: gcc/testsuite/c-c++-common/gomp/clauses-1.c
+===================================================================
+--- a/src/gcc/testsuite/c-c++-common/gomp/clauses-1.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/testsuite/c-c++-common/gomp/clauses-1.c (.../branches/gcc-6-branch)
+@@ -34,7 +34,7 @@
+
+ void
+ bar (int d, int m, int i1, int i2, int p, int *idp, int s,
+- int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q)
++ int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int *dd)
+ {
+ #pragma omp for simd \
+ private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait \
+@@ -63,12 +63,13 @@
+ }
+ #pragma omp target parallel \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+- if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
++ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
++ nowait depend(inout: dd[0])
+ ;
+ #pragma omp target parallel for \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
+- lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1)
++ lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0])
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp target parallel for simd \
+@@ -75,17 +76,17 @@
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
+- safelen(8) simdlen(4) aligned(q: 32)
++ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0])
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp target teams \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+- shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
++ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0])
+ ;
+ #pragma omp target teams distribute \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+- collapse(1) dist_schedule(static, 16)
++ collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0])
+ for (int i = 0; i < 64; i++)
+ ;
+ #pragma omp target teams distribute parallel for \
+@@ -93,7 +94,7 @@
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+ collapse(1) dist_schedule(static, 16) \
+ if (parallel: i2) num_threads (nth) proc_bind(spread) \
+- lastprivate (l) schedule(static, 4)
++ lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0])
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp target teams distribute parallel for simd \
+@@ -102,7 +103,7 @@
+ collapse(1) dist_schedule(static, 16) \
+ if (parallel: i2) num_threads (nth) proc_bind(spread) \
+ lastprivate (l) schedule(static, 4) \
+- safelen(8) simdlen(4) aligned(q: 32)
++ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0])
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp target teams distribute simd \
+@@ -109,12 +110,13 @@
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+ collapse(1) dist_schedule(static, 16) \
+- safelen(8) simdlen(4) aligned(q: 32)
++ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0])
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp target simd \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+- safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r)
++ safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r) \
++ nowait depend(inout: dd[0])
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp taskloop simd \
+@@ -128,7 +130,7 @@
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r)
+ for (int i = 0; i < 64; i++)
+ ll++;
+- #pragma omp target
++ #pragma omp target nowait depend(inout: dd[0])
+ #pragma omp teams distribute \
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+ collapse(1) dist_schedule(static, 16)
Index: gcc/testsuite/c-c++-common/goacc/combined-reduction.c
===================================================================
--- a/src/gcc/testsuite/c-c++-common/goacc/combined-reduction.c (.../tags/gcc_6_1_0_release)
@@ -6619,6 +9719,31 @@ Index: gcc/cp/typeck.c
{
tree t = complete_type (TREE_TYPE (type));
unsigned int needs_constructing, has_nontrivial_dtor;
+Index: gcc/cp/init.c
+===================================================================
+--- a/src/gcc/cp/init.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/init.c (.../branches/gcc-6-branch)
+@@ -2375,7 +2375,8 @@
+
+ STRIP_NOPS (oper);
+
+- if (TREE_CODE (oper) == ARRAY_REF)
++ if (TREE_CODE (oper) == ARRAY_REF
++ && (addr_expr || TREE_CODE (TREE_TYPE (oper)) == ARRAY_TYPE))
+ {
+ /* Similar to the offset computed above, see if the array index
+ is a compile-time constant. If so, and unless the offset was
+@@ -2404,8 +2405,8 @@
+ bool compref = TREE_CODE (oper) == COMPONENT_REF;
+
+ /* Descend into a struct or union to find the member whose address
+- is being used as the agument. */
+- while (TREE_CODE (oper) == COMPONENT_REF)
++ is being used as the argument. */
++ if (TREE_CODE (oper) == COMPONENT_REF)
+ {
+ tree op0 = oper;
+ while (TREE_CODE (op0 = TREE_OPERAND (op0, 0)) == COMPONENT_REF);
Index: gcc/cp/decl.c
===================================================================
--- a/src/gcc/cp/decl.c (.../tags/gcc_6_1_0_release)
@@ -6665,6 +9790,19 @@ Index: gcc/cp/decl.c
for (itk = (use_short_enum ? itk_char : itk_int);
itk != itk_none;
itk++)
+Index: gcc/cp/method.c
+===================================================================
+--- a/src/gcc/cp/method.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/method.c (.../branches/gcc-6-branch)
+@@ -484,6 +484,8 @@
+ tree type = TREE_TYPE (parm);
+ if (DECL_PACK_P (parm))
+ type = PACK_EXPANSION_PATTERN (type);
++ if (TREE_CODE (type) != REFERENCE_TYPE)
++ type = cp_build_reference_type (type, /*rval=*/true);
+ exp = build_static_cast (type, exp, tf_warning_or_error);
+ if (DECL_PACK_P (parm))
+ exp = make_pack_expansion (exp);
Index: gcc/cp/constexpr.c
===================================================================
--- a/src/gcc/cp/constexpr.c (.../tags/gcc_6_1_0_release)
@@ -6693,11 +9831,86 @@ Index: gcc/cp/ChangeLog
===================================================================
--- a/src/gcc/cp/ChangeLog (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/cp/ChangeLog (.../branches/gcc-6-branch)
-@@ -1,3 +1,66 @@
+@@ -1,3 +1,141 @@
++2016-06-02 Jakub Jelinek <jakub at redhat.com>
++
++ PR c++/71372
++ * cp-gimplify.c (cp_fold): For INDIRECT_REF, if the folded expression
++ is INDIRECT_REF or MEM_REF, copy over TREE_READONLY, TREE_SIDE_EFFECTS
++ and TREE_THIS_VOLATILE flags. For ARRAY_REF and ARRAY_RANGE_REF, copy
++ over TREE_READONLY, TREE_SIDE_EFFECTS and TREE_THIS_VOLATILE flags
++ to the newly built tree.
++
++2016-06-02 Paolo Carlini <paolo.carlini at oracle.com>
++
++ PR c++/70972
++ * method.c (forward_parm): Use cp_build_reference_type.
++
++2016-05-31 Jason Merrill <jason at redhat.com>
++
++ PR c++/71166
++ * decl2.c (c_parse_final_cleanups): Don't call fini_constexpr.
++
++ PR c++/71227
++ * pt.c (check_explicit_specialization): Give better diagnostic about
++ specializing a hidden friend.
++
++ PR c++/60095
++ PR c++/69515
++ PR c++/69009
++ * pt.c (instantiate_template_1): Don't put the partial
++ specialization in DECL_TI_TEMPLATE.
++ (partial_specialization_p, impartial_args): Remove.
++ (regenerate_decl_from_template): Add args parm.
++ (instantiate_decl): Look up the partial specialization again.
++
++ PR c++/71173
++ PR c++/70522
++ * cp-tree.h (enum tag_types): Add scope_type.
++ * parser.c (cp_parser_class_name): Use scope_type.
++ (prefer_type_arg): Handle scope_type.
++ (cp_parser_lookup_name): Use prefer_type_arg.
++ * name-lookup.c (lookup_qualified_name): Change bool is_type_p to
++ int prefer_type, use lookup_flags.
++ * name-lookup.h: Adjust.
++
++ PR c++/70584
++ * cp-gimplify.c (cp_fold_maybe_rvalue): Loop in case cp_fold
++ returns a decl.
++ (cp_fold) [INDIRECT_REF]: Call maybe_undo_parenthesized_ref.
++
++ PR c++/70735
++ * pt.c (tsubst_copy): Just return a local variable from
++ non-template context. Don't call rest_of_decl_compilation for
++ duplicated static locals.
++ (tsubst_decl): Set DECL_CONTEXT of local static from another
++ function.
++
++2016-05-31 Martin Sebor <msebor at redhat.com>
++
++ PR c++/71306
++ * init.c (warn_placement_new_too_small): Handle placement new arguments
++ that are elements of arrays more carefully. Remove a pointless loop.
++
++2016-05-30 Jakub Jelinek <jakub at redhat.com>
++
++ PR c++/71349
++ * parser.c (cp_parser_omp_for): Don't disallow nowait clause
++ when combined with target construct.
++ (cp_parser_omp_parallel): Pass cclauses == NULL as last argument
++ to cp_parser_omp_all_clauses.
++
++2016-05-29 Paolo Carlini <paolo.carlini at oracle.com>
++
++ PR c++/71105
++ * lambda.c (maybe_add_lambda_conv_op): Early return also when
++ LAMBDA_EXPR_DEFAULT_CAPTURE_MODE != CPLD_NONE.
++
+2016-05-24 Martin Sebor <msebor at redhat.com>
+
+ PR c++/71147
-+ * decl.c (layout_var_decl, grokdeclarator): Use complete_or_array_type_p.
++ * decl.c (layout_var_decl, grokdeclarator): Use
++ complete_or_array_type_p.
+ * pt.c (instantiate_class_template_1): Try to complete the element
+ type of a flexible array member.
+ (can_complete_type_without_circularity): Handle arrays of unknown bound.
@@ -6760,6 +9973,94 @@ Index: gcc/cp/ChangeLog
2016-04-27 Release Manager
* GCC 6.1.0 released.
+Index: gcc/cp/cp-gimplify.c
+===================================================================
+--- a/src/gcc/cp/cp-gimplify.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/cp-gimplify.c (.../branches/gcc-6-branch)
+@@ -1877,13 +1877,21 @@
+ static tree
+ cp_fold_maybe_rvalue (tree x, bool rval)
+ {
+- if (rval && DECL_P (x))
++ while (true)
+ {
+- tree v = decl_constant_value (x);
+- if (v != error_mark_node)
+- x = v;
++ x = cp_fold (x);
++ if (rval && DECL_P (x))
++ {
++ tree v = decl_constant_value (x);
++ if (v != x && v != error_mark_node)
++ {
++ x = v;
++ continue;
++ }
++ }
++ break;
+ }
+- return cp_fold (x);
++ return x;
+ }
+
+ /* Fold expression X which is used as an rvalue. */
+@@ -1995,6 +2003,15 @@
+
+ break;
+
++ case INDIRECT_REF:
++ /* We don't need the decltype(auto) obfuscation anymore. */
++ if (REF_PARENTHESIZED_P (x))
++ {
++ tree p = maybe_undo_parenthesized_ref (x);
++ return cp_fold (p);
++ }
++ goto unary;
++
+ case ADDR_EXPR:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+@@ -2007,7 +2024,7 @@
+ case BIT_NOT_EXPR:
+ case TRUTH_NOT_EXPR:
+ case FIXED_CONVERT_EXPR:
+- case INDIRECT_REF:
++ unary:
+
+ loc = EXPR_LOCATION (x);
+ op0 = cp_fold_maybe_rvalue (TREE_OPERAND (x, 0), rval_ops);
+@@ -2017,7 +2034,16 @@
+ if (op0 == error_mark_node)
+ x = error_mark_node;
+ else
+- x = fold_build1_loc (loc, code, TREE_TYPE (x), op0);
++ {
++ x = fold_build1_loc (loc, code, TREE_TYPE (x), op0);
++ if (code == INDIRECT_REF
++ && (INDIRECT_REF_P (x) || TREE_CODE (x) == MEM_REF))
++ {
++ TREE_READONLY (x) = TREE_READONLY (org_x);
++ TREE_SIDE_EFFECTS (x) = TREE_SIDE_EFFECTS (org_x);
++ TREE_THIS_VOLATILE (x) = TREE_THIS_VOLATILE (org_x);
++ }
++ }
+ }
+ else
+ x = fold (x);
+@@ -2294,7 +2320,12 @@
+ || op3 == error_mark_node)
+ x = error_mark_node;
+ else
+- x = build4_loc (loc, code, TREE_TYPE (x), op0, op1, op2, op3);
++ {
++ x = build4_loc (loc, code, TREE_TYPE (x), op0, op1, op2, op3);
++ TREE_READONLY (x) = TREE_READONLY (org_x);
++ TREE_SIDE_EFFECTS (x) = TREE_SIDE_EFFECTS (org_x);
++ TREE_THIS_VOLATILE (x) = TREE_THIS_VOLATILE (org_x);
++ }
+ }
+
+ x = fold (x);
Index: gcc/cp/typeck2.c
===================================================================
--- a/src/gcc/cp/typeck2.c (.../tags/gcc_6_1_0_release)
@@ -6785,7 +10086,45 @@ Index: gcc/cp/pt.c
===================================================================
--- a/src/gcc/cp/pt.c (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/cp/pt.c (.../branches/gcc-6-branch)
-@@ -9554,7 +9554,7 @@
+@@ -182,7 +182,6 @@
+ static tree tsubst_template_arg (tree, tree, tsubst_flags_t, tree);
+ static tree tsubst_template_args (tree, tree, tsubst_flags_t, tree);
+ static tree tsubst_template_parms (tree, tree, tsubst_flags_t);
+-static void regenerate_decl_from_template (tree, tree);
+ static tree most_specialized_partial_spec (tree, tsubst_flags_t);
+ static tree tsubst_aggr_type (tree, tree, tsubst_flags_t, tree, int);
+ static tree tsubst_arg_types (tree, tree, tree, tsubst_flags_t, tree);
+@@ -2808,6 +2807,13 @@
+ context. */
+ fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
+ false, true);
++ if (fns == error_mark_node)
++ /* If lookup fails, look for a friend declaration so we can
++ give a better diagnostic. */
++ fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
++ /*type*/false, /*complain*/true,
++ /*hidden*/true);
++
+ if (fns == error_mark_node || !is_overloaded_fn (fns))
+ {
+ error ("%qD is not a template function", dname);
+@@ -2953,6 +2959,15 @@
+ CP_DECL_CONTEXT (tmpl)))
+ error ("%qD is not declared in %qD",
+ tmpl, current_namespace);
++ else if (TREE_CODE (decl) == FUNCTION_DECL
++ && DECL_HIDDEN_FRIEND_P (tmpl))
++ {
++ if (pedwarn (DECL_SOURCE_LOCATION (decl), 0,
++ "friend declaration %qD is not visible to "
++ "explicit specialization", tmpl))
++ inform (DECL_SOURCE_LOCATION (tmpl),
++ "friend declaration here");
++ }
+
+ tree gen_tmpl = most_general_template (tmpl);
+
+@@ -9554,7 +9569,7 @@
return 0;
else if (COMPLETE_TYPE_P (type))
return 1;
@@ -6794,7 +10133,7 @@ Index: gcc/cp/pt.c
return can_complete_type_without_circularity (TREE_TYPE (type));
else if (CLASS_TYPE_P (type)
&& TYPE_BEING_DEFINED (TYPE_MAIN_VARIANT (type)))
-@@ -10118,17 +10118,12 @@
+@@ -10118,17 +10133,12 @@
if (can_complete_type_without_circularity (rtype))
complete_type (rtype);
@@ -6817,6 +10156,181 @@ Index: gcc/cp/pt.c
cxx_incomplete_type_error (r, rtype);
TREE_TYPE (r) = error_mark_node;
}
+@@ -12285,6 +12295,14 @@
+ local_p = true;
+ /* Subsequent calls to pushdecl will fill this in. */
+ ctx = NULL_TREE;
++ /* Unless this is a reference to a static variable from an
++ enclosing function, in which case we need to fill it in now. */
++ if (TREE_STATIC (t))
++ {
++ tree fn = tsubst (DECL_CONTEXT (t), args, complain, in_decl);
++ if (fn != current_function_decl)
++ ctx = fn;
++ }
+ spec = retrieve_local_specialization (t);
+ }
+ /* If we already have the specialization we need, there is
+@@ -13996,7 +14014,8 @@
+ case FUNCTION_DECL:
+ if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t))
+ r = tsubst (t, args, complain, in_decl);
+- else if (local_variable_p (t))
++ else if (local_variable_p (t)
++ && uses_template_parms (DECL_CONTEXT (t)))
+ {
+ r = retrieve_local_specialization (t);
+ if (r == NULL_TREE)
+@@ -14040,14 +14059,9 @@
+ gcc_assert (cp_unevaluated_operand || TREE_STATIC (r)
+ || decl_constant_var_p (r)
+ || errorcount || sorrycount);
+- if (!processing_template_decl)
+- {
+- if (TREE_STATIC (r))
+- rest_of_decl_compilation (r, toplevel_bindings_p (),
+- at_eof);
+- else
+- r = process_outer_var_ref (r, complain);
+- }
++ if (!processing_template_decl
++ && !TREE_STATIC (r))
++ r = process_outer_var_ref (r, complain);
+ }
+ /* Remember this for subsequent uses. */
+ if (local_specializations)
+@@ -17376,6 +17390,7 @@
+
+ tree pattern = DECL_TEMPLATE_RESULT (gen_tmpl);
+
++ fndecl = NULL_TREE;
+ if (VAR_P (pattern))
+ {
+ /* We need to determine if we're using a partial or explicit
+@@ -17387,14 +17402,16 @@
+ pattern = error_mark_node;
+ else if (elt)
+ {
+- tmpl = TREE_VALUE (elt);
+- pattern = DECL_TEMPLATE_RESULT (tmpl);
+- targ_ptr = TREE_PURPOSE (elt);
++ tree partial_tmpl = TREE_VALUE (elt);
++ tree partial_args = TREE_PURPOSE (elt);
++ tree partial_pat = DECL_TEMPLATE_RESULT (partial_tmpl);
++ fndecl = tsubst (partial_pat, partial_args, complain, gen_tmpl);
+ }
+ }
+
+ /* Substitute template parameters to obtain the specialization. */
+- fndecl = tsubst (pattern, targ_ptr, complain, gen_tmpl);
++ if (fndecl == NULL_TREE)
++ fndecl = tsubst (pattern, targ_ptr, complain, gen_tmpl);
+ if (DECL_CLASS_SCOPE_P (gen_tmpl))
+ pop_nested_class ();
+ pop_from_top_level ();
+@@ -20848,36 +20865,6 @@
+ return decl;
+ }
+
+-/* True iff the TEMPLATE_DECL tmpl is a partial specialization. */
+-
+-static bool
+-partial_specialization_p (tree tmpl)
+-{
+- /* Any specialization has DECL_TEMPLATE_SPECIALIZATION. */
+- if (!DECL_TEMPLATE_SPECIALIZATION (tmpl))
+- return false;
+- tree t = DECL_TI_TEMPLATE (tmpl);
+- /* A specialization that fully specializes one of the containing classes is
+- not a partial specialization. */
+- return (list_length (DECL_TEMPLATE_PARMS (tmpl))
+- == list_length (DECL_TEMPLATE_PARMS (t)));
+-}
+-
+-/* If TMPL is a partial specialization, return the arguments for its primary
+- template. */
+-
+-static tree
+-impartial_args (tree tmpl, tree args)
+-{
+- if (!partial_specialization_p (tmpl))
+- return args;
+-
+- /* If TMPL is a partial specialization, we need to substitute to get
+- the args for the primary template. */
+- return tsubst_template_args (DECL_TI_ARGS (tmpl), args,
+- tf_warning_or_error, tmpl);
+-}
+-
+ /* Return the most specialized of the template partial specializations
+ which can produce TARGET, a specialization of some class or variable
+ template. The value returned is actually a TREE_LIST; the TREE_VALUE is
+@@ -21379,14 +21366,12 @@
+ to instantiate the DECL, we regenerate it. */
+
+ static void
+-regenerate_decl_from_template (tree decl, tree tmpl)
++regenerate_decl_from_template (tree decl, tree tmpl, tree args)
+ {
+ /* The arguments used to instantiate DECL, from the most general
+ template. */
+- tree args;
+ tree code_pattern;
+
+- args = DECL_TI_ARGS (decl);
+ code_pattern = DECL_TEMPLATE_RESULT (tmpl);
+
+ /* Make sure that we can see identifiers, and compute access
+@@ -21702,7 +21687,7 @@
+ return d;
+
+ gen_tmpl = most_general_template (tmpl);
+- gen_args = impartial_args (tmpl, DECL_TI_ARGS (d));
++ gen_args = DECL_TI_ARGS (d);
+
+ if (tmpl != gen_tmpl)
+ /* We should already have the extra args. */
+@@ -21721,6 +21706,20 @@
+ /* Set TD to the template whose DECL_TEMPLATE_RESULT is the pattern
+ for the instantiation. */
+ td = template_for_substitution (d);
++ args = gen_args;
++
++ if (VAR_P (d))
++ {
++ /* Look up an explicit specialization, if any. */
++ tree tid = lookup_template_variable (gen_tmpl, gen_args);
++ tree elt = most_specialized_partial_spec (tid, tf_warning_or_error);
++ if (elt && elt != error_mark_node)
++ {
++ td = TREE_VALUE (elt);
++ args = TREE_PURPOSE (elt);
++ }
++ }
++
+ code_pattern = DECL_TEMPLATE_RESULT (td);
+
+ /* We should never be trying to instantiate a member of a class
+@@ -21733,9 +21732,7 @@
+ outside the class, we may have too many arguments. Drop the
+ ones we don't need. The same is true for specializations. */
+ args = get_innermost_template_args
+- (gen_args, TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (td)));
+- else
+- args = gen_args;
++ (args, TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (td)));
+
+ if (TREE_CODE (d) == FUNCTION_DECL)
+ {
+@@ -21901,7 +21898,7 @@
+
+ /* Regenerate the declaration in case the template has been modified
+ by a subsequent redeclaration. */
+- regenerate_decl_from_template (d, td);
++ regenerate_decl_from_template (d, td, args);
+
+ /* We already set the file and line above. Reset them now in case
+ they changed as a result of calling regenerate_decl_from_template. */
Index: gcc/cp/semantics.c
===================================================================
--- a/src/gcc/cp/semantics.c (.../tags/gcc_6_1_0_release)
@@ -6840,6 +10354,19 @@ Index: gcc/cp/semantics.c
&& TREE_CODE (type) != POINTER_TYPE)
{
error ("linear clause applied to non-integral non-pointer "
+Index: gcc/cp/decl2.c
+===================================================================
+--- a/src/gcc/cp/decl2.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/decl2.c (.../branches/gcc-6-branch)
+@@ -4904,8 +4904,6 @@
+
+ finish_repo ();
+
+- fini_constexpr ();
+-
+ /* The entire file is now complete. If requested, dump everything
+ to a file. */
+ dump_tu ();
Index: gcc/cp/parser.c
===================================================================
--- a/src/gcc/cp/parser.c (.../tags/gcc_6_1_0_release)
@@ -6856,7 +10383,103 @@ Index: gcc/cp/parser.c
id = ansi_opname (op == NEW_EXPR
? VEC_NEW_EXPR : VEC_DELETE_EXPR);
}
-@@ -35396,6 +35397,8 @@
+@@ -21178,7 +21179,7 @@
+ resolution operator, object, function, and enumerator
+ names are ignored. */
+ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
+- tag_type = typename_type;
++ tag_type = scope_type;
+ /* Look up the name. */
+ decl = cp_parser_lookup_name (parser, identifier,
+ tag_type,
+@@ -24569,6 +24570,20 @@
+
+ /* Support Functions */
+
++/* Return the appropriate prefer_type argument for lookup_name_real based on
++ tag_type. */
++
++static inline int
++prefer_type_arg (tag_types tag_type)
++{
++ switch (tag_type)
++ {
++ case none_type: return 0; // No preference.
++ case scope_type: return 1; // Type or namespace.
++ default: return 2; // Type only.
++ }
++}
++
+ /* Looks up NAME in the current scope, as given by PARSER->SCOPE.
+ NAME should have one of the representations used for an
+ id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE
+@@ -24705,7 +24720,7 @@
+ errors may be issued. Even if we rollback the current
+ tentative parse, those errors are valid. */
+ decl = lookup_qualified_name (parser->scope, name,
+- tag_type != none_type,
++ prefer_type_arg (tag_type),
+ /*complain=*/true);
+
+ /* 3.4.3.1: In a lookup in which the constructor is an acceptable
+@@ -24726,7 +24741,7 @@
+ && DECL_SELF_REFERENCE_P (decl)
+ && same_type_p (DECL_CONTEXT (decl), parser->scope))
+ decl = lookup_qualified_name (parser->scope, ctor_identifier,
+- tag_type != none_type,
++ prefer_type_arg (tag_type),
+ /*complain=*/true);
+
+ /* If we have a single function from a using decl, pull it out. */
+@@ -24782,7 +24797,7 @@
+ decl = lookup_member (object_type,
+ name,
+ /*protect=*/0,
+- tag_type != none_type,
++ prefer_type_arg (tag_type),
+ tf_warning_or_error);
+ else
+ decl = NULL_TREE;
+@@ -24790,7 +24805,7 @@
+ if (!decl)
+ {
+ /* Look it up in the enclosing context. */
+- decl = lookup_name_real (name, tag_type != none_type,
++ decl = lookup_name_real (name, prefer_type_arg (tag_type),
+ /*nonclass=*/0,
+ /*block_p=*/true, is_namespace, 0);
+ /* DR 141 says when looking for a template-name after -> or ., only
+@@ -24815,7 +24830,7 @@
+ }
+ else
+ {
+- decl = lookup_name_real (name, tag_type != none_type,
++ decl = lookup_name_real (name, prefer_type_arg (tag_type),
+ /*nonclass=*/0,
+ /*block_p=*/true, is_namespace, 0);
+ parser->qualifying_scope = NULL_TREE;
+@@ -33884,7 +33899,9 @@
+
+ strcat (p_name, " for");
+ mask |= OMP_FOR_CLAUSE_MASK;
+- if (cclauses)
++ /* parallel for{, simd} disallows nowait clause, but for
++ target {teams distribute ,}parallel for{, simd} it should be accepted. */
++ if (cclauses && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
+ mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
+ /* Composite distribute parallel for{, simd} disallows ordered clause. */
+ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
+@@ -34223,7 +34240,8 @@
+ }
+ }
+
+- clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok);
++ clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
++ cclauses == NULL);
+ if (cclauses)
+ {
+ cp_omp_split_clauses (loc, OMP_PARALLEL, mask, clauses, cclauses);
+@@ -35396,6 +35414,8 @@
cp_parser_oacc_loop (cp_parser *parser, cp_token *pragma_tok, char *p_name,
omp_clause_mask mask, tree *cclauses, bool *if_p)
{
@@ -6865,7 +10488,7 @@ Index: gcc/cp/parser.c
strcat (p_name, " loop");
mask |= OACC_LOOP_CLAUSE_MASK;
-@@ -35403,7 +35406,7 @@
+@@ -35403,7 +35423,7 @@
cclauses == NULL);
if (cclauses)
{
@@ -6874,7 +10497,7 @@ Index: gcc/cp/parser.c
if (*cclauses)
*cclauses = finish_omp_clauses (*cclauses, false);
if (clauses)
-@@ -35496,8 +35499,6 @@
+@@ -35496,8 +35516,6 @@
if (strcmp (p, "loop") == 0)
{
cp_lexer_consume_token (parser->lexer);
@@ -6898,6 +10521,102 @@ Index: gcc/cp/call.c
/* And in C++98 a default constructor can't be explicit. */
&& cxx_dialect >= cxx11)
{
+Index: gcc/cp/lambda.c
+===================================================================
+--- a/src/gcc/cp/lambda.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/lambda.c (.../branches/gcc-6-branch)
+@@ -871,8 +871,10 @@
+ bool nested = (cfun != NULL);
+ bool nested_def = decl_function_context (TYPE_MAIN_DECL (type));
+ tree callop = lambda_function (type);
++ tree lam = CLASSTYPE_LAMBDA_EXPR (type);
+
+- if (LAMBDA_EXPR_CAPTURE_LIST (CLASSTYPE_LAMBDA_EXPR (type)) != NULL_TREE)
++ if (LAMBDA_EXPR_CAPTURE_LIST (lam) != NULL_TREE
++ || LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam) != CPLD_NONE)
+ return;
+
+ if (processing_template_decl)
+Index: gcc/cp/cp-tree.h
+===================================================================
+--- a/src/gcc/cp/cp-tree.h (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/cp-tree.h (.../branches/gcc-6-branch)
+@@ -4601,7 +4601,8 @@
+ class_type, /* "class" types. */
+ union_type, /* "union" types. */
+ enum_type, /* "enum" types. */
+- typename_type /* "typename" types. */
++ typename_type, /* "typename" types. */
++ scope_type /* namespace or tagged type name followed by :: */
+ };
+
+ /* The various kinds of lvalues we distinguish. */
+Index: gcc/cp/name-lookup.c
+===================================================================
+--- a/src/gcc/cp/name-lookup.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/name-lookup.c (.../branches/gcc-6-branch)
+@@ -4518,9 +4518,11 @@
+ }
+
+ /* Look up NAME (an IDENTIFIER_NODE) in SCOPE (either a NAMESPACE_DECL
+- or a class TYPE). If IS_TYPE_P is TRUE, then ignore non-type
+- bindings.
++ or a class TYPE).
+
++ If PREFER_TYPE is > 0, we only return TYPE_DECLs or namespaces.
++ If PREFER_TYPE is > 1, we only return TYPE_DECLs.
++
+ Returns a DECL (or OVERLOAD, or BASELINK) representing the
+ declaration found. If no suitable declaration can be found,
+ ERROR_MARK_NODE is returned. If COMPLAIN is true and SCOPE is
+@@ -4527,21 +4529,18 @@
+ neither a class-type nor a namespace a diagnostic is issued. */
+
+ tree
+-lookup_qualified_name (tree scope, tree name, bool is_type_p, bool complain,
++lookup_qualified_name (tree scope, tree name, int prefer_type, bool complain,
+ bool find_hidden)
+ {
+- int flags = 0;
+ tree t = NULL_TREE;
+
+- if (find_hidden)
+- flags |= LOOKUP_HIDDEN;
+-
+ if (TREE_CODE (scope) == NAMESPACE_DECL)
+ {
+ struct scope_binding binding = EMPTY_SCOPE_BINDING;
+
+- if (is_type_p)
+- flags |= LOOKUP_PREFER_TYPES;
++ int flags = lookup_flags (prefer_type, /*namespaces_only*/false);
++ if (find_hidden)
++ flags |= LOOKUP_HIDDEN;
+ if (qualified_lookup_using_namespace (name, scope, &binding, flags))
+ t = binding.value;
+ }
+@@ -4548,7 +4547,7 @@
+ else if (cxx_dialect != cxx98 && TREE_CODE (scope) == ENUMERAL_TYPE)
+ t = lookup_enumerator (scope, name);
+ else if (is_class_type (scope, complain))
+- t = lookup_member (scope, name, 2, is_type_p, tf_warning_or_error);
++ t = lookup_member (scope, name, 2, prefer_type, tf_warning_or_error);
+
+ if (!t)
+ return error_mark_node;
+Index: gcc/cp/name-lookup.h
+===================================================================
+--- a/src/gcc/cp/name-lookup.h (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/cp/name-lookup.h (.../branches/gcc-6-branch)
+@@ -327,7 +327,7 @@
+ extern void set_namespace_binding (tree, tree, tree);
+ extern bool hidden_name_p (tree);
+ extern tree remove_hidden_names (tree);
+-extern tree lookup_qualified_name (tree, tree, bool, bool, /*hidden*/bool = false);
++extern tree lookup_qualified_name (tree, tree, int, bool, /*hidden*/bool = false);
+ extern tree lookup_name_nonclass (tree);
+ extern tree lookup_name_innermost_nonclass_level (tree);
+ extern bool is_local_extern (tree);
Index: gcc/ipa-inline.h
===================================================================
--- a/src/gcc/ipa-inline.h (.../tags/gcc_6_1_0_release)
@@ -6933,6 +10652,42 @@ Index: gcc/dwarf2out.c
&& !native_encode_initializer (val, array + pos, fieldsize))
return false;
}
+@@ -19401,11 +19401,13 @@
+ static void
+ retry_incomplete_types (void)
+ {
++ set_early_dwarf s;
+ int i;
+
+ for (i = vec_safe_length (incomplete_types) - 1; i >= 0; i--)
+ if (should_emit_struct_debug ((*incomplete_types)[i], DINFO_USAGE_DIR_USE))
+ gen_type_die ((*incomplete_types)[i], comp_unit_die ());
++ vec_safe_truncate (incomplete_types, 0);
+ }
+
+ /* Determine what tag to use for a record type. */
+@@ -27382,10 +27384,6 @@
+ resolve_addr (comp_unit_die ());
+ move_marked_base_types ();
+
+- /* Walk through the list of incomplete types again, trying once more to
+- emit full debugging info for them. */
+- retry_incomplete_types ();
+-
+ if (flag_eliminate_unused_debug_types)
+ prune_unused_types ();
+
+@@ -27686,6 +27684,10 @@
+ static void
+ dwarf2out_early_finish (void)
+ {
++ /* Walk through the list of incomplete types again, trying once more to
++ emit full debugging info for them. */
++ retry_incomplete_types ();
++
+ /* The point here is to flush out the limbo list so that it is empty
+ and we don't need to stream it for LTO. */
+ flush_limbo_die_list ();
Index: gcc/ada/s-osinte-gnu.ads
===================================================================
--- a/src/gcc/ada/s-osinte-gnu.ads (.../tags/gcc_6_1_0_release)
@@ -6951,6 +10706,41 @@ Index: gcc/ada/s-osinte-gnu.ads
-- Returns the size of a page
-- From /usr/include/i386-gnu/bits/mman.h
+Index: gcc/ada/s-osinte-kfreebsd-gnu.ads
+===================================================================
+--- a/src/gcc/ada/s-osinte-kfreebsd-gnu.ads (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/ada/s-osinte-kfreebsd-gnu.ads (.../branches/gcc-6-branch)
+@@ -7,7 +7,7 @@
+ -- S p e c --
+ -- --
+ -- Copyright (C) 1991-1994, Florida State University --
+--- Copyright (C) 1995-2015, Free Software Foundation, Inc. --
++-- Copyright (C) 1995-2016, Free Software Foundation, Inc. --
+ -- --
+ -- GNAT is free software; you can redistribute it and/or modify it under --
+ -- terms of the GNU General Public License as published by the Free Soft- --
+@@ -216,6 +216,11 @@
+ return int;
+ pragma Import (C, clock_gettime, "clock_gettime");
+
++ function clock_getres
++ (clock_id : clockid_t;
++ res : access timespec) return int;
++ pragma Import (C, clock_getres, "clock_getres");
++
+ function To_Duration (TS : timespec) return Duration;
+ pragma Inline (To_Duration);
+
+@@ -330,8 +335,7 @@
+ -- returns the stack base of the specified thread. Only call this function
+ -- when Stack_Base_Available is True.
+
+- function Get_Page_Size return size_t;
+- function Get_Page_Size return Address;
++ function Get_Page_Size return int;
+ pragma Import (C, Get_Page_Size, "getpagesize");
+ -- Returns the size of a page
+
Index: gcc/ada/a-textio.adb
===================================================================
--- a/src/gcc/ada/a-textio.adb (.../tags/gcc_6_1_0_release)
@@ -7001,6 +10791,132 @@ Index: gcc/ada/system-linux-armeb.ads
type Name is (SYSTEM_NAME_GNAT);
System_Name : constant Name := SYSTEM_NAME_GNAT;
+Index: gcc/ada/g-comlin.adb
+===================================================================
+--- a/src/gcc/ada/g-comlin.adb (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/ada/g-comlin.adb (.../branches/gcc-6-branch)
+@@ -6,7 +6,7 @@
+ -- --
+ -- B o d y --
+ -- --
+--- Copyright (C) 1999-2015, Free Software Foundation, Inc. --
++-- Copyright (C) 1999-2016, Free Software Foundation, Inc. --
+ -- --
+ -- GNAT is free software; you can redistribute it and/or modify it under --
+ -- terms of the GNU General Public License as published by the Free Soft- --
+@@ -3151,16 +3151,18 @@
+
+ New_Line;
+
+- if Section /= "" then
++ if Section /= "" and then Config.Switches /= null then
+ Put_Line ("Switches after " & Section);
+ end if;
+
+ -- Compute size of the switches column
+
+- for S in Config.Switches'Range loop
+- Max_Len := Natural'Max
+- (Max_Len, Switch_Name (Config.Switches (S), Section)'Length);
+- end loop;
++ if Config.Switches /= null then
++ for S in Config.Switches'Range loop
++ Max_Len := Natural'Max
++ (Max_Len, Switch_Name (Config.Switches (S), Section)'Length);
++ end loop;
++ end if;
+
+ if Config.Aliases /= null then
+ for A in Config.Aliases'Range loop
+@@ -3173,26 +3175,28 @@
+
+ -- Display the switches
+
+- for S in Config.Switches'Range loop
+- declare
+- N : constant String :=
+- Switch_Name (Config.Switches (S), Section);
++ if Config.Switches /= null then
++ for S in Config.Switches'Range loop
++ declare
++ N : constant String :=
++ Switch_Name (Config.Switches (S), Section);
+
+- begin
+- if N /= "" then
+- Put (" ");
+- Put (N);
+- Put ((1 .. Max_Len - N'Length + 1 => ' '));
++ begin
++ if N /= "" then
++ Put (" ");
++ Put (N);
++ Put ((1 .. Max_Len - N'Length + 1 => ' '));
+
+- if Config.Switches (S).Help /= null then
+- Put (Config.Switches (S).Help.all);
++ if Config.Switches (S).Help /= null then
++ Put (Config.Switches (S).Help.all);
++ end if;
++
++ New_Line;
+ end if;
++ end;
++ end loop;
++ end if;
+
+- New_Line;
+- end if;
+- end;
+- end loop;
+-
+ -- Display the aliases
+
+ if Config.Aliases /= null then
+@@ -3454,25 +3458,27 @@
+
+ -- Initialize output values for automatically handled switches
+
+- for S in Config.Switches'Range loop
+- case Config.Switches (S).Typ is
+- when Switch_Untyped =>
+- null; -- Nothing to do
++ if Config.Switches /= null then
++ for S in Config.Switches'Range loop
++ case Config.Switches (S).Typ is
++ when Switch_Untyped =>
++ null; -- Nothing to do
+
+- when Switch_Boolean =>
+- Config.Switches (S).Boolean_Output.all :=
+- not Config.Switches (S).Boolean_Value;
++ when Switch_Boolean =>
++ Config.Switches (S).Boolean_Output.all :=
++ not Config.Switches (S).Boolean_Value;
+
+- when Switch_Integer =>
+- Config.Switches (S).Integer_Output.all :=
+- Config.Switches (S).Integer_Initial;
++ when Switch_Integer =>
++ Config.Switches (S).Integer_Output.all :=
++ Config.Switches (S).Integer_Initial;
+
+- when Switch_String =>
+- if Config.Switches (S).String_Output.all = null then
+- Config.Switches (S).String_Output.all := new String'("");
+- end if;
+- end case;
+- end loop;
++ when Switch_String =>
++ if Config.Switches (S).String_Output.all = null then
++ Config.Switches (S).String_Output.all := new String'("");
++ end if;
++ end case;
++ end loop;
++ end if;
+
+ -- For all sections, and all switches within those sections
+
Index: gcc/ada/system-linux-sparcv9.ads
===================================================================
--- a/src/gcc/ada/system-linux-sparcv9.ads (.../tags/gcc_6_1_0_release)
@@ -7028,7 +10944,25 @@ Index: gcc/ada/ChangeLog
===================================================================
--- a/src/gcc/ada/ChangeLog (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/ada/ChangeLog (.../branches/gcc-6-branch)
-@@ -1,3 +1,49 @@
+@@ -1,3 +1,67 @@
++2016-06-01 Simon Wright <simon at pushface.org>
++
++ PR ada/71358
++ * g-comlin.adb (Display_Section_Help): Do not dereference
++ Config.Switches if it's null.
++ (Getopt): Likewise.
++
++2016-05-31 Eric Botcazou <ebotcazou at adacore.com>
++
++ * s-osinte-kfreebsd-gnu.ads (clock_getres): Define.
++ (Get_Page_Size): Remove duplicate and return int.
++
++2016-05-31 Jan Sommer <soja-lists at aries.uberspace.de>
++
++ PR ada/71317
++ * s-osinte-rtems.ads (clock_getres): Define.
++ (Get_Page_Size): Remove duplicate and return int.
++
+2016-05-06 Eric Botcazou <ebotcazou at adacore.com>
+
+ PR ada/70969
@@ -7078,6 +11012,41 @@ Index: gcc/ada/ChangeLog
2016-04-27 Release Manager
* GCC 6.1.0 released.
+Index: gcc/ada/s-osinte-rtems.ads
+===================================================================
+--- a/src/gcc/ada/s-osinte-rtems.ads (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/ada/s-osinte-rtems.ads (.../branches/gcc-6-branch)
+@@ -6,7 +6,7 @@
+ -- --
+ -- S p e c --
+ -- --
+--- Copyright (C) 1997-2011 Free Software Foundation, Inc. --
++-- Copyright (C) 1997-2016 Free Software Foundation, Inc. --
+ -- --
+ -- GNARL is free software; you can redistribute it and/or modify it under --
+ -- terms of the GNU General Public License as published by the Free Soft- --
+@@ -188,6 +188,11 @@
+ tp : access timespec) return int;
+ pragma Import (C, clock_gettime, "clock_gettime");
+
++ function clock_getres
++ (clock_id : clockid_t;
++ res : access timespec) return int;
++ pragma Import (C, clock_getres, "clock_getres");
++
+ function To_Duration (TS : timespec) return Duration;
+ pragma Inline (To_Duration);
+
+@@ -291,8 +296,7 @@
+ -- These two functions are only needed to share s-taprop.adb with
+ -- FSU threads.
+
+- function Get_Page_Size return size_t;
+- function Get_Page_Size return Address;
++ function Get_Page_Size return int;
+ pragma Import (C, Get_Page_Size, "getpagesize");
+ -- Returns the size of a page
+
Index: gcc/ada/sem_ch6.adb
===================================================================
--- a/src/gcc/ada/sem_ch6.adb (.../tags/gcc_6_1_0_release)
@@ -8605,7 +12574,60 @@ Index: gcc/fortran/decl.c
goto ok;
gfc_error ("Derived type at %C has not been previously defined "
-@@ -5791,6 +6071,10 @@
+@@ -4698,12 +4978,51 @@
+ static bool
+ copy_prefix (symbol_attribute *dest, locus *where)
+ {
+- if (current_attr.pure && !gfc_add_pure (dest, where))
++ if (dest->module_procedure)
++ {
++ if (current_attr.elemental)
++ dest->elemental = 1;
++
++ if (current_attr.pure)
++ dest->pure = 1;
++
++ if (current_attr.recursive)
++ dest->recursive = 1;
++
++ /* Module procedures are unusual in that the 'dest' is copied from
++ the interface declaration. However, this is an oportunity to
++ check that the submodule declaration is compliant with the
++ interface. */
++ if (dest->elemental && !current_attr.elemental)
++ {
++ gfc_error ("ELEMENTAL prefix in MODULE PROCEDURE interface is "
++ "missing at %L", where);
+ return false;
++ }
+
++ if (dest->pure && !current_attr.pure)
++ {
++ gfc_error ("PURE prefix in MODULE PROCEDURE interface is "
++ "missing at %L", where);
++ return false;
++ }
++
++ if (dest->recursive && !current_attr.recursive)
++ {
++ gfc_error ("RECURSIVE prefix in MODULE PROCEDURE interface is "
++ "missing at %L", where);
++ return false;
++ }
++
++ return true;
++ }
++
+ if (current_attr.elemental && !gfc_add_elemental (dest, where))
+ return false;
+
++ if (current_attr.pure && !gfc_add_pure (dest, where))
++ return false;
++
+ if (current_attr.recursive && !gfc_add_recursive (dest, where))
+ return false;
+
+@@ -5791,6 +6110,10 @@
gfc_error ("ENTRY statement at %C cannot appear within "
"an INTERFACE");
break;
@@ -8616,7 +12638,7 @@ Index: gcc/fortran/decl.c
case COMP_DERIVED:
gfc_error ("ENTRY statement at %C cannot appear within "
"a DERIVED TYPE block");
-@@ -6450,6 +6734,24 @@
+@@ -6450,6 +6773,24 @@
eos_ok = 0;
break;
@@ -8641,7 +12663,7 @@ Index: gcc/fortran/decl.c
case COMP_DERIVED:
case COMP_DERIVED_CONTAINS:
*st = ST_END_TYPE;
-@@ -8020,6 +8322,208 @@
+@@ -8020,6 +8361,208 @@
}
@@ -8850,7 +12872,7 @@ Index: gcc/fortran/decl.c
/* Match the beginning of a derived type declaration. If a type name
was the result of a function, then it is possible to have a symbol
already to be known as a derived type yet have no components. */
-@@ -8037,7 +8541,7 @@
+@@ -8037,7 +8580,7 @@
bool seen_attr = false;
gfc_interface *intr = NULL, *head;
@@ -8859,7 +12881,7 @@ Index: gcc/fortran/decl.c
return MATCH_NO;
name[0] = '\0';
-@@ -8111,9 +8615,7 @@
+@@ -8111,9 +8654,7 @@
if (!sym)
{
/* Use upper case to save the actual derived-type symbol. */
@@ -8999,7 +13021,23 @@ Index: gcc/fortran/ChangeLog
===================================================================
--- a/src/gcc/fortran/ChangeLog (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/fortran/ChangeLog (.../branches/gcc-6-branch)
-@@ -1,3 +1,126 @@
+@@ -1,3 +1,142 @@
++2016-06-01 Paul Thomas <pault at gcc.gnu.org>
++
++ PR fortran/71156
++ * decl.c (copy_prefix): Add checks that the module procedure
++ declaration prefixes are compliant with the interface. Invert
++ order of existing elemental and pure checks.
++ * resolve.c (resolve_fl_procedure): Invert order of elemental
++ and pure errors.
++
++2016-06-01 Jakub Jelinek <jakub at redhat.com>
++
++ * parse.c (case_decl): Move ST_OMP_* to ...
++ (case_omp_decl): ... here, new macro.
++ (verify_st_order): For case_omp_decl, complain about
++ p->state >= ORDER_EXEC, but don't change p->state otherwise.
++
+2016-05-26 Jerry DeLisle <jvdelisle at gcc.gnu.org>
+
+ Backport from trunk.
@@ -9019,7 +13057,7 @@ Index: gcc/fortran/ChangeLog
+ * interface.c (compare_parameter): Check for non-NULL pointer.
+
+2016-05-14 Fritz Reese <fritzoreese at gmail.com>
-+
++
+ Backport from trunk: r236242
+ * gfortran.texi: Update example of DEC UNION extension.
+
@@ -9703,6 +13741,28 @@ Index: gcc/fortran/resolve.c
{
gfc_error ("The type %qs cannot be host associated at %L "
"because it is blocked by an incompatible object "
+@@ -11959,17 +11965,17 @@
+ goto check_formal;
+
+ /* Check the procedure characteristics. */
+- if (sym->attr.pure != iface->attr.pure)
++ if (sym->attr.elemental != iface->attr.elemental)
+ {
+- gfc_error ("Mismatch in PURE attribute between MODULE "
++ gfc_error ("Mismatch in ELEMENTAL attribute between MODULE "
+ "PROCEDURE at %L and its interface in %s",
+ &sym->declared_at, module_name);
+ return false;
+ }
+
+- if (sym->attr.elemental != iface->attr.elemental)
++ if (sym->attr.pure != iface->attr.pure)
+ {
+- gfc_error ("Mismatch in ELEMENTAL attribute between MODULE "
++ gfc_error ("Mismatch in PURE attribute between MODULE "
+ "PROCEDURE at %L and its interface in %s",
+ &sym->declared_at, module_name);
+ return false;
@@ -12733,7 +12739,8 @@
}
@@ -10944,7 +15004,25 @@ Index: gcc/fortran/parse.c
match ("unlock", gfc_match_unlock, ST_UNLOCK);
break;
-@@ -1642,6 +1646,15 @@
+@@ -1386,10 +1390,14 @@
+
+ #define case_decl case ST_ATTR_DECL: case ST_COMMON: case ST_DATA_DECL: \
+ case ST_EQUIVALENCE: case ST_NAMELIST: case ST_STATEMENT_FUNCTION: \
+- case ST_TYPE: case ST_INTERFACE: case ST_OMP_THREADPRIVATE: \
+- case ST_PROCEDURE: case ST_OMP_DECLARE_SIMD: case ST_OMP_DECLARE_REDUCTION: \
+- case ST_OMP_DECLARE_TARGET: case ST_OACC_ROUTINE: case ST_OACC_DECLARE
++ case ST_TYPE: case ST_INTERFACE: case ST_PROCEDURE: case ST_OACC_ROUTINE: \
++ case ST_OACC_DECLARE
+
++/* OpenMP declaration statements. */
++
++#define case_omp_decl case ST_OMP_THREADPRIVATE: case ST_OMP_DECLARE_SIMD: \
++ case ST_OMP_DECLARE_TARGET: case ST_OMP_DECLARE_REDUCTION
++
+ /* Block end statements. Errors associated with interchanging these
+ are detected in gfc_match_end(). */
+
+@@ -1642,6 +1650,15 @@
case ST_DEALLOCATE:
p = "DEALLOCATE";
break;
@@ -10960,7 +15038,7 @@ Index: gcc/fortran/parse.c
case ST_DERIVED_DECL:
p = _("derived type declaration");
break;
-@@ -1711,6 +1724,15 @@
+@@ -1711,6 +1728,15 @@
case ST_END_WHERE:
p = "END WHERE";
break;
@@ -10976,7 +15054,7 @@ Index: gcc/fortran/parse.c
case ST_END_TYPE:
p = "END TYPE";
break;
-@@ -2457,6 +2479,7 @@
+@@ -2457,6 +2483,7 @@
case ST_PUBLIC:
case ST_PRIVATE:
@@ -10984,7 +15062,22 @@ Index: gcc/fortran/parse.c
case ST_DERIVED_DECL:
case_decl:
if (p->state >= ORDER_EXEC)
-@@ -2646,6 +2669,358 @@
+@@ -2465,6 +2492,14 @@
+ p->state = ORDER_SPEC;
+ break;
+
++ case_omp_decl:
++ /* The OpenMP directives have to be somewhere in the specification
++ part, but there are no further requirements on their ordering.
++ Thus don't adjust p->state, just ignore them. */
++ if (p->state >= ORDER_EXEC)
++ goto order;
++ break;
++
+ case_executable:
+ case_exec_markers:
+ if (p->state < ORDER_EXEC)
+@@ -2646,6 +2681,358 @@
}
@@ -11343,7 +15436,7 @@ Index: gcc/fortran/parse.c
/* Parse a derived type. */
static void
-@@ -2762,171 +3137,8 @@
+@@ -2762,171 +3149,8 @@
*/
sym = gfc_current_block ();
for (c = sym->components; c; c = c->next)
@@ -11516,15 +15609,18 @@ Index: gcc/fortran/parse.c
if (!seen_component)
sym->attr.zero_comp = 1;
-@@ -3348,6 +3560,7 @@
+@@ -3348,8 +3572,10 @@
case ST_PARAMETER:
case ST_PUBLIC:
case ST_PRIVATE:
+ case ST_STRUCTURE_DECL:
case ST_DERIVED_DECL:
case_decl:
++ case_omp_decl:
declSt:
-@@ -3364,6 +3577,10 @@
+ if (!verify_st_order (&ss, st, false))
+ {
+@@ -3364,6 +3590,10 @@
parse_interface ();
break;
@@ -11966,11 +16062,36 @@ Index: gcc/stor-layout.h
extern unsigned int update_alignment_for_field (record_layout_info, tree,
unsigned int);
extern record_layout_info start_record_layout (tree);
+Index: gcc/alias.c
+===================================================================
+--- a/src/gcc/alias.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/alias.c (.../branches/gcc-6-branch)
+@@ -769,6 +769,10 @@
+ tree
+ reference_alias_ptr_type (tree t)
+ {
++ /* If the frontend assigns this alias-set zero, preserve that. */
++ if (lang_hooks.get_alias_set (t) == 0)
++ return ptr_type_node;
++
+ tree ptype = reference_alias_ptr_type_1 (&t);
+ /* If there is a given pointer type for aliasing purposes, return it. */
+ if (ptype != NULL_TREE)
Index: gcc/tree-vect-loop.c
===================================================================
--- a/src/gcc/tree-vect-loop.c (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/tree-vect-loop.c (.../branches/gcc-6-branch)
-@@ -437,7 +437,9 @@
+@@ -216,7 +216,8 @@
+
+ gcc_assert (stmt_info);
+
+- if (STMT_VINFO_RELEVANT_P (stmt_info))
++ if (STMT_VINFO_RELEVANT_P (stmt_info)
++ || STMT_VINFO_LIVE_P (stmt_info))
+ {
+ gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
+ scalar_type = TREE_TYPE (PHI_RESULT (phi));
+@@ -437,9 +438,12 @@
/* Bool ops don't participate in vectorization factor
computation. For comparison use compared types to
compute a factor. */
@@ -11979,8 +16100,12 @@ Index: gcc/tree-vect-loop.c
+ && is_gimple_assign (stmt)
+ && gimple_assign_rhs_code (stmt) != COND_EXPR)
{
- if (STMT_VINFO_RELEVANT_P (stmt_info))
+- if (STMT_VINFO_RELEVANT_P (stmt_info))
++ if (STMT_VINFO_RELEVANT_P (stmt_info)
++ || STMT_VINFO_LIVE_P (stmt_info))
mask_producers.safe_push (stmt_info);
+ bool_result = true;
+
Index: gcc/ipa-inline-analysis.c
===================================================================
--- a/src/gcc/ipa-inline-analysis.c (.../tags/gcc_6_1_0_release)
@@ -12397,6 +16522,19 @@ Index: gcc/gimplify.c
if (outer)
{
n = splay_tree_lookup (outer->variables,
+@@ -8946,7 +8952,12 @@
+ || (ort == ORT_SIMD
+ && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1))
+ {
++ struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
++ /* Make sure omp_add_variable is not called on it prematurely.
++ We call it ourselves a few lines later. */
++ gimplify_omp_ctxp = NULL;
+ var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
++ gimplify_omp_ctxp = ctx;
+ TREE_OPERAND (t, 0) = var;
+
+ gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var));
Index: gcc/graphite-scop-detection.c
===================================================================
--- a/src/gcc/graphite-scop-detection.c (.../tags/gcc_6_1_0_release)
@@ -793170,6 +797308,27 @@ Index: gcc/hsa-gen.c
make_edge (e->src, default_label_bb, EDGE_FALSE_VALUE);
hsa_cfun->m_modified_cfg = true;
+Index: gcc/config.gcc
+===================================================================
+--- a/src/gcc/config.gcc (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config.gcc (.../branches/gcc-6-branch)
+@@ -1058,13 +1058,11 @@
+ case $target in
+ armv6*-*-freebsd*)
+ tm_defines="${tm_defines} TARGET_FREEBSD_ARMv6=1"
++ if test $fbsd_major -ge 11; then
++ tm_defines="${tm_defines} TARGET_FREEBSD_ARM_HARD_FLOAT=1"
++ fi
+ ;;
+ esac
+- case $target in
+- arm*hf-*-freebsd*)
+- tm_defines="${tm_defines} TARGET_FREEBSD_ARM_HARD_FLOAT=1"
+- ;;
+- esac
+ with_tls=${with_tls:-gnu}
+ ;;
+ arm*-*-netbsdelf*)
Index: gcc/tree-ssa-structalias.c
===================================================================
--- a/src/gcc/tree-ssa-structalias.c (.../tags/gcc_6_1_0_release)
@@ -793670,11 +797829,364 @@ Index: gcc/config/aarch64/aarch64-simd.md
;; FP Max/Min
;; Max/Min are introduced by idiom recognition by GCC's mid-end. An
;; expression like:
+Index: gcc/config/rs6000/vector.md
+===================================================================
+--- a/src/gcc/config/rs6000/vector.md (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/vector.md (.../branches/gcc-6-branch)
+@@ -26,6 +26,13 @@
+ ;; Vector int modes
+ (define_mode_iterator VEC_I [V16QI V8HI V4SI V2DI])
+
++;; Vector int modes for parity
++(define_mode_iterator VEC_IP [V8HI
++ V4SI
++ V2DI
++ V1TI
++ (TI "TARGET_VSX_TIMODE")])
++
+ ;; Vector float modes
+ (define_mode_iterator VEC_F [V4SF V2DF])
+
+@@ -738,6 +745,12 @@
+ (clz:VEC_I (match_operand:VEC_I 1 "register_operand" "")))]
+ "TARGET_P8_VECTOR")
+
++;; Vector count trailing zeros
++(define_expand "ctz<mode>2"
++ [(set (match_operand:VEC_I 0 "register_operand" "")
++ (ctz:VEC_I (match_operand:VEC_I 1 "register_operand" "")))]
++ "TARGET_P9_VECTOR")
++
+ ;; Vector population count
+ (define_expand "popcount<mode>2"
+ [(set (match_operand:VEC_I 0 "register_operand" "")
+@@ -744,6 +757,12 @@
+ (popcount:VEC_I (match_operand:VEC_I 1 "register_operand" "")))]
+ "TARGET_P8_VECTOR")
+
++;; Vector parity
++(define_expand "parity<mode>2"
++ [(set (match_operand:VEC_IP 0 "register_operand" "")
++ (parity:VEC_IP (match_operand:VEC_IP 1 "register_operand" "")))]
++ "TARGET_P9_VECTOR")
++
+
+ ;; Same size conversions
+ (define_expand "float<VEC_int><mode>2"
+Index: gcc/config/rs6000/constraints.md
+===================================================================
+--- a/src/gcc/config/rs6000/constraints.md (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/constraints.md (.../branches/gcc-6-branch)
+@@ -156,6 +156,11 @@
+ (and (match_test "TARGET_DIRECT_MOVE_128")
+ (match_test "(ival == VECTOR_ELEMENT_MFVSRLD_64BIT)"))))
+
++;; ISA 3.0 vector d-form addresses
++(define_memory_constraint "wO"
++ "Memory operand suitable for the ISA 3.0 vector d-form instructions."
++ (match_operand 0 "vsx_quad_dform_memory_operand"))
++
+ ;; Lq/stq validates the address for load/store quad
+ (define_memory_constraint "wQ"
+ "Memory operand suitable for the load/store quad instructions"
+Index: gcc/config/rs6000/predicates.md
+===================================================================
+--- a/src/gcc/config/rs6000/predicates.md (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/predicates.md (.../branches/gcc-6-branch)
+@@ -698,48 +698,25 @@
+ (define_predicate "quad_memory_operand"
+ (match_code "mem")
+ {
+- rtx addr, op0, op1;
+- int ret;
+-
+ if (!TARGET_QUAD_MEMORY && !TARGET_SYNC_TI)
+- ret = 0;
++ return false;
+
+- else if (!memory_operand (op, mode))
+- ret = 0;
++ if (GET_MODE_SIZE (mode) != 16 || !MEM_P (op) || MEM_ALIGN (op) < 128)
++ return false;
+
+- else if (GET_MODE_SIZE (GET_MODE (op)) != 16)
+- ret = 0;
++ return quad_address_p (XEXP (op, 0), mode, true);
++})
+
+- else if (MEM_ALIGN (op) < 128)
+- ret = 0;
++;; Return 1 if the operand is suitable for load/store to vector registers with
++;; d-form addressing (register+offset), which was added in ISA 3.0.
++;; Unlike quad_memory_operand, we do not have to check for alignment.
++(define_predicate "vsx_quad_dform_memory_operand"
++ (match_code "mem")
++{
++ if (!TARGET_P9_DFORM_VECTOR || !MEM_P (op) || GET_MODE_SIZE (mode) != 16)
++ return false;
+
+- else
+- {
+- addr = XEXP (op, 0);
+- if (int_reg_operand (addr, Pmode))
+- ret = 1;
+-
+- else if (GET_CODE (addr) != PLUS)
+- ret = 0;
+-
+- else
+- {
+- op0 = XEXP (addr, 0);
+- op1 = XEXP (addr, 1);
+- ret = (int_reg_operand (op0, Pmode)
+- && GET_CODE (op1) == CONST_INT
+- && IN_RANGE (INTVAL (op1), -32768, 32767)
+- && (INTVAL (op1) & 15) == 0);
+- }
+- }
+-
+- if (TARGET_DEBUG_ADDR)
+- {
+- fprintf (stderr, "\nquad_memory_operand, ret = %s\n", ret ? "true" : "false");
+- debug_rtx (op);
+- }
+-
+- return ret;
++ return quad_address_p (XEXP (op, 0), mode, false);
+ })
+
+ ;; Return 1 if the operand is an indexed or indirect memory operand.
+Index: gcc/config/rs6000/rs6000-cpus.def
+===================================================================
+--- a/src/gcc/config/rs6000/rs6000-cpus.def (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/rs6000-cpus.def (.../branches/gcc-6-branch)
+@@ -60,13 +60,14 @@
+ | OPTION_MASK_UPPER_REGS_SF)
+
+ /* Add ISEL back into ISA 3.0, since it is supposed to be a win. Do not add
+- P9_DFORM or P9_MINMAX until they are fully debugged. */
++ P9_MINMAX until the hardware that supports it is available. Do not add
++ P9_DFORM_VECTOR until LRA is the default register allocator. */
+ #define ISA_3_0_MASKS_SERVER (ISA_2_7_MASKS_SERVER \
+ | OPTION_MASK_FLOAT128_HW \
+ | OPTION_MASK_ISEL \
+ | OPTION_MASK_MODULO \
+ | OPTION_MASK_P9_FUSION \
+- | OPTION_MASK_P9_DFORM \
++ | OPTION_MASK_P9_DFORM_SCALAR \
+ | OPTION_MASK_P9_VECTOR)
+
+ #define POWERPC_7400_MASK (OPTION_MASK_PPC_GFXOPT | OPTION_MASK_ALTIVEC)
+@@ -94,6 +95,7 @@
+ | OPTION_MASK_FPRND \
+ | OPTION_MASK_HTM \
+ | OPTION_MASK_ISEL \
++ | OPTION_MASK_LRA \
+ | OPTION_MASK_MFCRF \
+ | OPTION_MASK_MFPGPR \
+ | OPTION_MASK_MODULO \
+@@ -101,7 +103,8 @@
+ | OPTION_MASK_NO_UPDATE \
+ | OPTION_MASK_P8_FUSION \
+ | OPTION_MASK_P8_VECTOR \
+- | OPTION_MASK_P9_DFORM \
++ | OPTION_MASK_P9_DFORM_SCALAR \
++ | OPTION_MASK_P9_DFORM_VECTOR \
+ | OPTION_MASK_P9_FUSION \
+ | OPTION_MASK_P9_MINMAX \
+ | OPTION_MASK_P9_VECTOR \
+Index: gcc/config/rs6000/rs6000-protos.h
+===================================================================
+--- a/src/gcc/config/rs6000/rs6000-protos.h (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/rs6000-protos.h (.../branches/gcc-6-branch)
+@@ -86,6 +86,7 @@
+ extern int mems_ok_for_quad_peep (rtx, rtx);
+ extern bool gpr_or_gpr_p (rtx, rtx);
+ extern bool direct_move_p (rtx, rtx);
++extern bool quad_address_p (rtx, machine_mode, bool);
+ extern bool quad_load_store_p (rtx, rtx);
+ extern bool fusion_gpr_load_p (rtx, rtx, rtx, rtx);
+ extern void expand_fusion_gpr_load (rtx *);
Index: gcc/config/rs6000/rs6000-builtin.def
===================================================================
--- a/src/gcc/config/rs6000/rs6000-builtin.def (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/config/rs6000/rs6000-builtin.def (.../branches/gcc-6-branch)
-@@ -1391,13 +1391,13 @@
+@@ -24,6 +24,7 @@
+ <http://www.gnu.org/licenses/>. */
+
+ /* Before including this file, some macros must be defined:
++ RS6000_BUILTIN_0 -- 0 arg builtins
+ RS6000_BUILTIN_1 -- 1 arg builtins
+ RS6000_BUILTIN_2 -- 2 arg builtins
+ RS6000_BUILTIN_3 -- 3 arg builtins
+@@ -43,6 +44,10 @@
+ ATTR builtin attribute information.
+ ICODE Insn code of the function that implents the builtin. */
+
++#ifndef RS6000_BUILTIN_0
++ #error "RS6000_BUILTIN_0 is not defined."
++#endif
++
+ #ifndef RS6000_BUILTIN_1
+ #error "RS6000_BUILTIN_1 is not defined."
+ #endif
+@@ -637,6 +642,41 @@
+ | RS6000_BTC_TERNARY), \
+ CODE_FOR_ ## ICODE) /* ICODE */
+
++/* Miscellaneous builtins for instructions added in ISA 3.0. These
++ instructions don't require either the DFP or VSX options, just the basic
++ ISA 3.0 enablement since they operate on general purpose registers. */
++#define BU_P9_MISC_1(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_1 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_" NAME, /* NAME */ \
++ RS6000_BTM_MODULO, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_UNARY), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++/* Miscellaneous builtins for instructions added in ISA 3.0. These
++ instructions don't require either the DFP or VSX options, just the basic
++ ISA 3.0 enablement since they operate on general purpose registers,
++ and they require 64-bit addressing. */
++#define BU_P9_64BIT_MISC_0(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_0 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_" NAME, /* NAME */ \
++ RS6000_BTM_MODULO \
++ | RS6000_BTM_64BIT, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_SPECIAL), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++/* Miscellaneous builtins for instructions added in ISA 3.0. These
++ instructions don't require either the DFP or VSX options, just the basic
++ ISA 3.0 enablement since they operate on general purpose registers. */
++#define BU_P9_MISC_0(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_0 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_" NAME, /* NAME */ \
++ RS6000_BTM_MODULO, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_SPECIAL), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
+ /* 128-bit long double floating point builtins. */
+ #define BU_LDBL128_2(ENUM, NAME, ATTR, ICODE) \
+ RS6000_BUILTIN_2 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
+@@ -647,8 +687,113 @@
+ | RS6000_BTC_BINARY), \
+ CODE_FOR_ ## ICODE) /* ICODE */
+
++
++/* Miscellaneous builtins for instructions added in ISA 3.0. These
++ instructions don't require either the DFP or VSX options, just the basic
++ ISA 3.0 enablement since they operate on general purpose registers. */
++#define BU_P9_MISC_1(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_1 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_" NAME, /* NAME */ \
++ RS6000_BTM_MODULO, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_UNARY), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++/* Miscellaneous builtins for instructions added in ISA 3.0. These
++ instructions don't require either the DFP or VSX options, just the basic
++ ISA 3.0 enablement since they operate on general purpose registers,
++ and they require 64-bit addressing. */
++#define BU_P9_64BIT_MISC_0(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_0 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_" NAME, /* NAME */ \
++ RS6000_BTM_MODULO \
++ | RS6000_BTM_64BIT, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_SPECIAL), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++/* Miscellaneous builtins for instructions added in ISA 3.0. These
++ instructions don't require either the DFP or VSX options, just the basic
++ ISA 3.0 enablement since they operate on general purpose registers. */
++#define BU_P9_MISC_0(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_0 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_" NAME, /* NAME */ \
++ RS6000_BTM_MODULO, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_SPECIAL), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++/* ISA 3.0 (power9) vector convenience macros. */
++/* For the instructions that are encoded as altivec instructions use
++ __builtin_altivec_ as the builtin name. */
++#define BU_P9V_AV_1(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_1 (P9V_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_altivec_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_UNARY), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++#define BU_P9V_AV_2(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_2 (P9V_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_altivec_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_BINARY), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++#define BU_P9V_AV_3(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_3 (P9V_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_altivec_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_TERNARY), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++#define BU_P9V_AV_P(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_P (P9V_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_altivec_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_PREDICATE), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++/* For the instructions encoded as VSX instructions use __builtin_vsx as the
++ builtin name. */
++#define BU_P9V_VSX_1(ENUM, NAME, ATTR, ICODE) \
++ RS6000_BUILTIN_1 (P9V_BUILTIN_ ## ENUM, /* ENUM */ \
++ "__builtin_vsx_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_ ## ATTR /* ATTR */ \
++ | RS6000_BTC_UNARY), \
++ CODE_FOR_ ## ICODE) /* ICODE */
++
++#define BU_P9V_OVERLOAD_1(ENUM, NAME) \
++ RS6000_BUILTIN_1 (P9V_BUILTIN_VEC_ ## ENUM, /* ENUM */ \
++ "__builtin_vec_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_OVERLOADED /* ATTR */ \
++ | RS6000_BTC_UNARY), \
++ CODE_FOR_nothing) /* ICODE */
++
++#define BU_P9V_OVERLOAD_2(ENUM, NAME) \
++ RS6000_BUILTIN_2 (P9V_BUILTIN_VEC_ ## ENUM, /* ENUM */ \
++ "__builtin_vec_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_OVERLOADED /* ATTR */ \
++ | RS6000_BTC_BINARY), \
++ CODE_FOR_nothing) /* ICODE */
++
++#define BU_P9V_OVERLOAD_3(ENUM, NAME) \
++ RS6000_BUILTIN_3 (P9V_BUILTIN_VEC_ ## ENUM, /* ENUM */ \
++ "__builtin_vec_" NAME, /* NAME */ \
++ RS6000_BTM_P9_VECTOR, /* MASK */ \
++ (RS6000_BTC_OVERLOADED /* ATTR */ \
++ | RS6000_BTC_TERNARY), \
++ CODE_FOR_nothing) /* ICODE */
+ #endif
+
++
+ /* Insure 0 is not a legitimate index. */
+ BU_SPECIAL_X (RS6000_BUILTIN_NONE, NULL, 0, RS6000_BTC_MISC)
+
+@@ -1391,13 +1536,25 @@
BU_VSX_X (LXVW4X_V8HI, "lxvw4x_v8hi", MEM)
BU_VSX_X (LXVW4X_V16QI, "lxvw4x_v16qi", MEM)
BU_VSX_X (STXSDX, "stxsdx", MEM)
@@ -793692,14 +798204,1652 @@ Index: gcc/config/rs6000/rs6000-builtin.def
+BU_VSX_X (STXVW4X_V4SI, "stxvw4x_v4si", MEM)
+BU_VSX_X (STXVW4X_V8HI, "stxvw4x_v8hi", MEM)
+BU_VSX_X (STXVW4X_V16QI, "stxvw4x_v16qi", MEM)
++BU_VSX_X (LD_ELEMREV_V2DF, "ld_elemrev_v2df", MEM)
++BU_VSX_X (LD_ELEMREV_V2DI, "ld_elemrev_v2di", MEM)
++BU_VSX_X (LD_ELEMREV_V4SF, "ld_elemrev_v4sf", MEM)
++BU_VSX_X (LD_ELEMREV_V4SI, "ld_elemrev_v4si", MEM)
++BU_VSX_X (LD_ELEMREV_V8HI, "ld_elemrev_v8hi", MEM)
++BU_VSX_X (LD_ELEMREV_V16QI, "ld_elemrev_v16qi", MEM)
++BU_VSX_X (ST_ELEMREV_V2DF, "st_elemrev_v2df", MEM)
++BU_VSX_X (ST_ELEMREV_V2DI, "st_elemrev_v2di", MEM)
++BU_VSX_X (ST_ELEMREV_V4SF, "st_elemrev_v4sf", MEM)
++BU_VSX_X (ST_ELEMREV_V4SI, "st_elemrev_v4si", MEM)
++BU_VSX_X (ST_ELEMREV_V8HI, "st_elemrev_v8hi", MEM)
++BU_VSX_X (ST_ELEMREV_V16QI, "st_elemrev_v16qi", MEM)
BU_VSX_X (XSABSDP, "xsabsdp", CONST)
BU_VSX_X (XSADDDP, "xsadddp", FP)
BU_VSX_X (XSCMPODP, "xscmpodp", FP)
+@@ -1455,6 +1612,8 @@
+ /* VSX builtins that are handled as special cases. */
+ BU_VSX_OVERLOAD_X (LD, "ld")
+ BU_VSX_OVERLOAD_X (ST, "st")
++BU_VSX_OVERLOAD_X (XL, "xl")
++BU_VSX_OVERLOAD_X (XST, "xst")
+
+ /* 1 argument VSX instructions added in ISA 2.07. */
+ BU_P8V_VSX_1 (XSCVSPDPN, "xscvspdpn", CONST, vsx_xscvspdpn)
+@@ -1639,6 +1798,11 @@
+ BU_DFP_MISC_2 (PACK_TD, "pack_dec128", CONST, packtd)
+ BU_DFP_MISC_2 (UNPACK_TD, "unpack_dec128", CONST, unpacktd)
+
++/* 0 argument general-purpose register functions added in ISA 3.0 (power9). */
++BU_P9_MISC_0 (DARN_32, "darn_32", MISC, darn_32)
++BU_P9_64BIT_MISC_0 (DARN_RAW, "darn_raw", MISC, darn_raw)
++BU_P9_64BIT_MISC_0 (DARN, "darn", MISC, darn)
++
+ BU_LDBL128_2 (PACK_TF, "pack_longdouble", CONST, packtf)
+ BU_LDBL128_2 (UNPACK_TF, "unpack_longdouble", CONST, unpacktf)
+
+@@ -1645,6 +1809,26 @@
+ BU_P7_MISC_2 (PACK_V1TI, "pack_vector_int128", CONST, packv1ti)
+ BU_P7_MISC_2 (UNPACK_V1TI, "unpack_vector_int128", CONST, unpackv1ti)
+
++/* 1 argument vector functions added in ISA 3.0 (power9). */
++BU_P9V_AV_1 (VCTZB, "vctzb", CONST, ctzv16qi2)
++BU_P9V_AV_1 (VCTZH, "vctzh", CONST, ctzv8hi2)
++BU_P9V_AV_1 (VCTZW, "vctzw", CONST, ctzv4si2)
++BU_P9V_AV_1 (VCTZD, "vctzd", CONST, ctzv2di2)
++BU_P9V_AV_1 (VPRTYBD, "vprtybd", CONST, parityv2di2)
++BU_P9V_AV_1 (VPRTYBQ, "vprtybq", CONST, parityv1ti2)
++BU_P9V_AV_1 (VPRTYBW, "vprtybw", CONST, parityv4si2)
++
++/* ISA 3.0 vector overloaded 1 argument functions. */
++BU_P9V_OVERLOAD_1 (VCTZ, "vctz")
++BU_P9V_OVERLOAD_1 (VCTZB, "vctzb")
++BU_P9V_OVERLOAD_1 (VCTZH, "vctzh")
++BU_P9V_OVERLOAD_1 (VCTZW, "vctzw")
++BU_P9V_OVERLOAD_1 (VCTZD, "vctzd")
++BU_P9V_OVERLOAD_1 (VPRTYB, "vprtyb")
++BU_P9V_OVERLOAD_1 (VPRTYBD, "vprtybd")
++BU_P9V_OVERLOAD_1 (VPRTYBQ, "vprtybq")
++BU_P9V_OVERLOAD_1 (VPRTYBW, "vprtybw")
++
+
+ /* 1 argument crypto functions. */
+ BU_CRYPTO_1 (VSBOX, "vsbox", CONST, crypto_vsbox)
+Index: gcc/config/rs6000/rs6000-c.c
+===================================================================
+--- a/src/gcc/config/rs6000/rs6000-c.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/rs6000-c.c (.../branches/gcc-6-branch)
+@@ -2726,6 +2726,49 @@
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUMS, ALTIVEC_BUILTIN_VSUMSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DF,
++ RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DF,
++ RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
++ RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
++ RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
++ RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_V2DI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
++ RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_long_long, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SF,
++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SF,
++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_V16QI, 0 },
++ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+@@ -3475,6 +3518,55 @@
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DF,
++ RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DF,
++ RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
++ RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
++ RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_long_long },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_V2DI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_long_long },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SF,
++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SF,
++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_V4SI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_UINTSI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_V8HI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_UINTHI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_unsigned_V16QI },
++ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
++ ~RS6000_BTI_UINTQI },
+ { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI,
+@@ -4123,6 +4215,43 @@
+ { P8V_BUILTIN_VEC_VCLZD, P8V_BUILTIN_VCLZD,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 },
+
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZB,
++ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZB,
++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZH,
++ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZH,
++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZW,
++ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZW,
++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZD,
++ RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZD,
++ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 },
++
++ { P9V_BUILTIN_VEC_VCTZB, P9V_BUILTIN_VCTZB,
++ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZB, P9V_BUILTIN_VCTZB,
++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 },
++
++ { P9V_BUILTIN_VEC_VCTZH, P9V_BUILTIN_VCTZH,
++ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZH, P9V_BUILTIN_VCTZH,
++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 },
++
++ { P9V_BUILTIN_VEC_VCTZW, P9V_BUILTIN_VCTZW,
++ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZW, P9V_BUILTIN_VCTZW,
++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 },
++
++ { P9V_BUILTIN_VEC_VCTZD, P9V_BUILTIN_VCTZD,
++ RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 },
++ { P9V_BUILTIN_VEC_VCTZD, P9V_BUILTIN_VCTZD,
++ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 },
++
+ { P8V_BUILTIN_VEC_VGBBD, P8V_BUILTIN_VGBBD,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { P8V_BUILTIN_VEC_VGBBD, P8V_BUILTIN_VGBBD,
+@@ -4252,6 +4381,42 @@
+ { P8V_BUILTIN_VEC_VPOPCNTD, P8V_BUILTIN_VPOPCNTD,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 },
+
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBW,
++ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBW,
++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBD,
++ RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBD,
++ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_INTTI, RS6000_BTI_INTTI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_UINTTI, RS6000_BTI_UINTTI, 0, 0 },
++
++ { P9V_BUILTIN_VEC_VPRTYBW, P9V_BUILTIN_VPRTYBW,
++ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYBW, P9V_BUILTIN_VPRTYBW,
++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 },
++
++ { P9V_BUILTIN_VEC_VPRTYBD, P9V_BUILTIN_VPRTYBD,
++ RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYBD, P9V_BUILTIN_VPRTYBD,
++ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 },
++
++ { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_INTTI, RS6000_BTI_INTTI, 0, 0 },
++ { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ,
++ RS6000_BTI_UINTTI, RS6000_BTI_UINTTI, 0, 0 },
++
+ { P8V_BUILTIN_VEC_VPKUDUM, P8V_BUILTIN_VPKUDUM,
+ RS6000_BTI_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { P8V_BUILTIN_VEC_VPKUDUM, P8V_BUILTIN_VPKUDUM,
+Index: gcc/config/rs6000/rs6000.opt
+===================================================================
+--- a/src/gcc/config/rs6000/rs6000.opt (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/rs6000.opt (.../branches/gcc-6-branch)
+@@ -470,8 +470,8 @@
+ -mlong-double-<n> Specify size of long double (64 or 128 bits).
+
+ mlra
+-Target Report Var(rs6000_lra_flag) Init(0) Save
+-Use LRA instead of reload.
++Target Report Mask(LRA) Var(rs6000_isa_flags)
++Enable Local Register Allocation.
+
+ msched-costly-dep=
+ Target RejectNegative Joined Var(rs6000_sched_costly_dep_str)
+@@ -609,9 +609,17 @@
+ Target Report Mask(P9_VECTOR) Var(rs6000_isa_flags)
+ Use/do not use vector and scalar instructions added in ISA 3.0.
+
++mpower9-dform-scalar
++Target Undocumented Mask(P9_DFORM_SCALAR) Var(rs6000_isa_flags)
++Use/do not use scalar register+offset memory instructions added in ISA 3.0.
++
++mpower9-dform-vector
++Target Undocumented Mask(P9_DFORM_VECTOR) Var(rs6000_isa_flags)
++Use/do not use vector register+offset memory instructions added in ISA 3.0.
++
+ mpower9-dform
+-Target Undocumented Mask(P9_DFORM) Var(rs6000_isa_flags)
+-Use/do not use vector and scalar instructions added in ISA 3.0.
++Target Report Var(TARGET_P9_DFORM_BOTH) Init(-1) Save
++Use/do not use register+offset memory instructions added in ISA 3.0.
+
+ mpower9-minmax
+ Target Undocumented Mask(P9_MINMAX) Var(rs6000_isa_flags)
Index: gcc/config/rs6000/rs6000.c
===================================================================
--- a/src/gcc/config/rs6000/rs6000.c (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/config/rs6000/rs6000.c (.../branches/gcc-6-branch)
-@@ -27724,6 +27724,11 @@
+@@ -452,6 +452,7 @@
+ #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
+ #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
+ #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
++#define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
+
+ /* Register type masks based on the type, of valid addressing modes. */
+ struct rs6000_reg_addr {
+@@ -499,6 +500,16 @@
+ return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
+ }
+
++/* Return true if we have D-form addressing in VSX registers. This addressing
++ is more limited than normal d-form addressing in that the offset must be
++ aligned on a 16-byte boundary. */
++static inline bool
++mode_supports_vsx_dform_quad (machine_mode mode)
++{
++ return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
++ != 0);
++}
++
+
+ /* Target cpu costs. */
+
+@@ -1128,6 +1139,7 @@
+
+
+ /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -1140,6 +1152,9 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
++ { NAME, ICODE, MASK, ATTR },
++
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
+ { NAME, ICODE, MASK, ATTR },
+
+@@ -1185,6 +1200,7 @@
+ #include "rs6000-builtin.def"
+ };
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -2105,7 +2121,9 @@
+ else if (keep_spaces)
+ *p++ = ' ';
+
+- if ((mask & RELOAD_REG_OFFSET) != 0)
++ if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
++ *p++ = 'O';
++ else if ((mask & RELOAD_REG_OFFSET) != 0)
+ *p++ = 'o';
+ else if (keep_spaces)
+ *p++ = ' ';
+@@ -2642,8 +2660,7 @@
+ if (TARGET_LINK_STACK)
+ fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
+
+- if (targetm.lra_p ())
+- fprintf (stderr, DEBUG_FMT_S, "lra", "true");
++ fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
+
+ if (TARGET_P8_FUSION)
+ {
+@@ -2769,17 +2786,31 @@
+ }
+
+ /* GPR and FPR registers can do REG+OFFSET addressing, except
+- possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form
+- addressing for scalars to altivec registers. */
++ possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
++ for 64-bit scalars and 32-bit SFmode to altivec registers. */
+ if ((addr_mask != 0) && !indexed_only_p
+ && msize <= 8
+ && (rc == RELOAD_REG_GPR
+- || rc == RELOAD_REG_FPR
+- || (rc == RELOAD_REG_VMX
+- && TARGET_P9_DFORM
+- && (m2 == DFmode || m2 == SFmode))))
++ || ((msize == 8 || m2 == SFmode)
++ && (rc == RELOAD_REG_FPR
++ || (rc == RELOAD_REG_VMX
++ && TARGET_P9_DFORM_SCALAR)))))
+ addr_mask |= RELOAD_REG_OFFSET;
+
++ /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
++ instructions are enabled. The offset for 128-bit VSX registers is
++ only 12-bits. While GPRs can handle the full offset range, VSX
++ registers can only handle the restricted range. */
++ else if ((addr_mask != 0) && !indexed_only_p
++ && msize == 16 && TARGET_P9_DFORM_VECTOR
++ && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
++ || (m2 == TImode && TARGET_VSX_TIMODE)))
++ {
++ addr_mask |= RELOAD_REG_OFFSET;
++ if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
++ addr_mask |= RELOAD_REG_QUAD_OFFSET;
++ }
++
+ /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
+ addressing on 128-bit types. */
+ if (rc == RELOAD_REG_VMX && msize == 16
+@@ -3102,7 +3133,7 @@
+ }
+
+ /* Support for new D-form instructions. */
+- if (TARGET_P9_DFORM)
++ if (TARGET_P9_DFORM_SCALAR)
+ rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
+
+ /* Support for ISA 3.0 (power9) vectors. */
+@@ -3621,6 +3652,9 @@
+ | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
+ | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
+ | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
++ | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
++ | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
++ | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
+ | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
+ | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
+ | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
+@@ -3974,7 +4008,8 @@
+
+ /* For the newer switches (vsx, dfp, etc.) set some of the older options,
+ unless the user explicitly used the -mno-<option> to disable the code. */
+- if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM || TARGET_P9_MINMAX)
++ if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
++ || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0 || TARGET_P9_MINMAX)
+ rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~rs6000_isa_flags_explicit);
+ else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
+ rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
+@@ -4188,26 +4223,49 @@
+ && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
+ rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
+
++ /* -mpower9-dform turns on both -mpower9-dform-scalar and
++ -mpower9-dform-vector. There are currently problems if
++ -mpower9-dform-vector instructions are enabled when we use the RELOAD
++ register allocator. */
++ if (TARGET_P9_DFORM_BOTH > 0)
++ {
++ if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR)
++ && TARGET_LRA)
++ rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
++
++ if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
++ rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
++ }
++ else if (TARGET_P9_DFORM_BOTH == 0)
++ {
++ if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
++ rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
++
++ if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
++ rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
++ }
++
+ /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
+- if (TARGET_P9_DFORM && !TARGET_P9_VECTOR)
++ if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
+ {
+ if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
+ error ("-mpower9-dform requires -mpower9-vector");
+- rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM;
++ rs6000_isa_flags &= ~(OPTION_MASK_P9_DFORM_SCALAR
++ | OPTION_MASK_P9_DFORM_VECTOR);
+ }
+
+- if (TARGET_P9_DFORM && !TARGET_UPPER_REGS_DF)
++ if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
+ {
+ if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
+ error ("-mpower9-dform requires -mupper-regs-df");
+- rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM;
++ rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
+ }
+
+- if (TARGET_P9_DFORM && !TARGET_UPPER_REGS_SF)
++ if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
+ {
+ if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
+ error ("-mpower9-dform requires -mupper-regs-sf");
+- rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM;
++ rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
+ }
+
+ /* ISA 3.0 vector instructions include ISA 2.07. */
+@@ -4218,6 +4276,47 @@
+ rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
+ }
+
++ /* There have been bugs with both -mvsx-timode and -mpower9-dform-vector that
++ don't show up with -mlra, but do show up with -mno-lra. Given -mlra will
++ become the default once PR 69847 is fixed, turn off the options with
++ problems by default if -mno-lra was used, and warn if the user explicitly
++ asked for the option.
++
++ Enable -mpower9-dform-vector by default if LRA and other power9 options.
++ Enable -mvsx-timode by default if LRA and VSX. */
++ if (!TARGET_LRA)
++ {
++ if (TARGET_VSX_TIMODE)
++ {
++ if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
++ warning (0, "-mvsx-timode might need -mlra");
++
++ else
++ rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
++ }
++
++ if (TARGET_P9_DFORM_VECTOR)
++ {
++ if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) != 0)
++ warning (0, "-mpower9-dform-vector might need -mlra");
++
++ else
++ rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
++ }
++ }
++
++ else
++ {
++ if (TARGET_VSX && !TARGET_VSX_TIMODE
++ && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
++ rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
++
++ if (TARGET_VSX && TARGET_P9_VECTOR && !TARGET_P9_DFORM_VECTOR
++ && TARGET_P9_DFORM_SCALAR && TARGET_P9_DFORM_BOTH < 0
++ && (rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) == 0)
++ rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
++ }
++
+ /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
+ support. If we only have ISA 2.06 support, and the user did not specify
+ the switch, leave it set to -1 so the movmisalign patterns are enabled,
+@@ -6572,21 +6671,29 @@
+ gen_rtvec (3, target, reg,
+ force_reg (V16QImode, x)),
+ UNSPEC_VPERM);
+- else
++ else
+ {
+- /* Invert selector. We prefer to generate VNAND on P8 so
+- that future fusion opportunities can kick in, but must
+- generate VNOR elsewhere. */
+- rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
+- rtx iorx = (TARGET_P8_VECTOR
+- ? gen_rtx_IOR (V16QImode, notx, notx)
+- : gen_rtx_AND (V16QImode, notx, notx));
+- rtx tmp = gen_reg_rtx (V16QImode);
+- emit_insn (gen_rtx_SET (tmp, iorx));
++ if (TARGET_P9_VECTOR)
++ x = gen_rtx_UNSPEC (mode,
++ gen_rtvec (3, target, reg,
++ force_reg (V16QImode, x)),
++ UNSPEC_VPERMR);
++ else
++ {
++ /* Invert selector. We prefer to generate VNAND on P8 so
++ that future fusion opportunities can kick in, but must
++ generate VNOR elsewhere. */
++ rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
++ rtx iorx = (TARGET_P8_VECTOR
++ ? gen_rtx_IOR (V16QImode, notx, notx)
++ : gen_rtx_AND (V16QImode, notx, notx));
++ rtx tmp = gen_reg_rtx (V16QImode);
++ emit_insn (gen_rtx_SET (tmp, iorx));
+
+- /* Permute with operands reversed and adjusted selector. */
+- x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
+- UNSPEC_VPERM);
++ /* Permute with operands reversed and adjusted selector. */
++ x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
++ UNSPEC_VPERM);
++ }
+ }
+
+ emit_insn (gen_rtx_SET (target, x));
+@@ -6902,6 +7009,59 @@
+ return false;
+ }
+
++/* Return true if the OFFSET is valid for the quad address instructions that
++ use d-form (register + offset) addressing. */
++
++static inline bool
++quad_address_offset_p (HOST_WIDE_INT offset)
++{
++ return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
++}
++
++/* Return true if the ADDR is an acceptable address for a quad memory
++ operation of mode MODE (either LQ/STQ for general purpose registers, or
++ LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
++ is intended for LQ/STQ. If it is false, the address is intended for the ISA
++ 3.0 LXV/STXV instruction. */
++
++bool
++quad_address_p (rtx addr, machine_mode mode, bool gpr_p)
++{
++ rtx op0, op1;
++
++ if (GET_MODE_SIZE (mode) != 16)
++ return false;
++
++ if (gpr_p)
++ {
++ if (!TARGET_QUAD_MEMORY && !TARGET_SYNC_TI)
++ return false;
++
++ /* LQ/STQ can handle indirect addresses. */
++ if (base_reg_operand (addr, Pmode))
++ return true;
++ }
++
++ else
++ {
++ if (!mode_supports_vsx_dform_quad (mode))
++ return false;
++ }
++
++ if (GET_CODE (addr) != PLUS)
++ return false;
++
++ op0 = XEXP (addr, 0);
++ if (!base_reg_operand (op0, Pmode))
++ return false;
++
++ op1 = XEXP (addr, 1);
++ if (!CONST_INT_P (op1))
++ return false;
++
++ return quad_address_offset_p (INTVAL (op1));
++}
++
+ /* Return true if this is a load or store quad operation. This function does
+ not handle the atomic quad memory instructions. */
+
+@@ -6994,6 +7154,10 @@
+ if (TARGET_POWERPC64 && (offset & 3) != 0)
+ return false;
+
++ if (mode_supports_vsx_dform_quad (mode)
++ && !quad_address_offset_p (offset))
++ return false;
++
+ extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
+ if (extra < 0)
+ extra = 0;
+@@ -7023,13 +7187,14 @@
+ case TImode:
+ case TFmode:
+ case KFmode:
+- /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
+- TImode is not a vector mode, if we want to use the VSX registers to
+- move it around, we need to restrict ourselves to reg+reg addressing.
+- Similarly for IEEE 128-bit floating point that is passed in a single
+- vector register. */
++ /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
++ ISA 3.0 vector d-form addressing mode was added. While TImode is not
++ a vector mode, if we want to use the VSX registers to move it around,
++ we need to restrict ourselves to reg+reg addressing. Similarly for
++ IEEE 128-bit floating point that is passed in a single vector
++ register. */
+ if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
+- return false;
++ return mode_supports_vsx_dform_quad (mode);
+ break;
+
+ case V4HImode:
+@@ -7096,6 +7261,11 @@
+ if (GET_CODE (op) != SYMBOL_REF)
+ return false;
+
++ /* ISA 3.0 vector d-form addressing is restricted, don't allow
++ SYMBOL_REF. */
++ if (mode_supports_vsx_dform_quad (mode))
++ return false;
++
+ dsize = GET_MODE_SIZE (mode);
+ decl = SYMBOL_REF_DECL (op);
+ if (!decl)
+@@ -7250,6 +7420,9 @@
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
+ return false;
++ if (mode_supports_vsx_dform_quad (mode))
++ return (virtual_stack_registers_memory_p (x)
++ || quad_address_p (x, mode, false));
+ if (!reg_offset_addressing_ok_p (mode))
+ return virtual_stack_registers_memory_p (x);
+ if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
+@@ -7388,6 +7561,9 @@
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
+ return false;
++ /* quad word addresses are restricted, and we can't use LO_SUM. */
++ if (mode_supports_vsx_dform_quad (mode))
++ return false;
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return false;
+@@ -7399,7 +7575,7 @@
+
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ return false;
+- /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
++ /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
+ push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
+ recognizes some LO_SUM addresses as valid although this
+ function says opposite. In most cases, LRA through different
+@@ -7453,7 +7629,8 @@
+ {
+ unsigned int extra;
+
+- if (!reg_offset_addressing_ok_p (mode))
++ if (!reg_offset_addressing_ok_p (mode)
++ || mode_supports_vsx_dform_quad (mode))
+ {
+ if (virtual_stack_registers_memory_p (x))
+ return x;
+@@ -8164,6 +8341,11 @@
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
++ if (TARGET_DEBUG_ADDR)
++ {
++ fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
++ debug_rtx (x);
++ }
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+@@ -8175,6 +8357,11 @@
+ if (GET_CODE (x) == LO_SUM
+ && GET_CODE (XEXP (x, 0)) == HIGH)
+ {
++ if (TARGET_DEBUG_ADDR)
++ {
++ fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
++ debug_rtx (x);
++ }
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+@@ -8207,6 +8394,11 @@
+ {
+ rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
+ x = gen_rtx_LO_SUM (Pmode, hi, x);
++ if (TARGET_DEBUG_ADDR)
++ {
++ fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
++ debug_rtx (x);
++ }
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+@@ -8244,6 +8436,11 @@
+ GEN_INT (high)),
+ GEN_INT (low));
+
++ if (TARGET_DEBUG_ADDR)
++ {
++ fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
++ debug_rtx (x);
++ }
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+@@ -8304,6 +8501,11 @@
+ x = gen_rtx_LO_SUM (GET_MODE (x),
+ gen_rtx_HIGH (Pmode, x), x);
+
++ if (TARGET_DEBUG_ADDR)
++ {
++ fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
++ debug_rtx (x);
++ }
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+@@ -8337,9 +8539,16 @@
+ {
+ x = create_TOC_reference (x, NULL_RTX);
+ if (TARGET_CMODEL != CMODEL_SMALL)
+- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+- opnum, (enum reload_type) type);
++ {
++ if (TARGET_DEBUG_ADDR)
++ {
++ fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
++ debug_rtx (x);
++ }
++ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
++ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
++ opnum, (enum reload_type) type);
++ }
+ *win = 1;
+ return x;
+ }
+@@ -8395,6 +8604,7 @@
+ rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
+ {
+ bool reg_offset_p = reg_offset_addressing_ok_p (mode);
++ bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
+
+ /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
+ if (VECTOR_MEM_ALTIVEC_P (mode)
+@@ -8414,15 +8624,26 @@
+ return 1;
+ if (virtual_stack_registers_memory_p (x))
+ return 1;
+- if (reg_offset_p && legitimate_small_data_p (mode, x))
+- return 1;
+- if (reg_offset_p
+- && legitimate_constant_pool_address_p (x, mode,
++
++ /* Handle restricted vector d-form offsets in ISA 3.0. */
++ if (quad_offset_p)
++ {
++ if (quad_address_p (x, mode, false))
++ return 1;
++ }
++
++ else if (reg_offset_p)
++ {
++ if (legitimate_small_data_p (mode, x))
++ return 1;
++ if (legitimate_constant_pool_address_p (x, mode,
+ reg_ok_strict || lra_in_progress))
+- return 1;
+- if (reg_offset_p && reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
+- && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
+- return 1;
++ return 1;
++ if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
++ && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
++ return 1;
++ }
++
+ /* For TImode, if we have load/store quad and TImode in VSX registers, only
+ allow register indirect addresses. This will allow the values to go in
+ either GPRs or VSX registers without reloading. The vector types would
+@@ -8461,7 +8682,8 @@
+ && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
+ return 1;
+- if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
++ if (reg_offset_p && !quad_offset_p
++ && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
+ return 1;
+ return 0;
+ }
+@@ -12260,7 +12482,7 @@
+ /* const function, function only depends on the inputs. */
+ TREE_READONLY (t) = 1;
+ TREE_NOTHROW (t) = 1;
+- attr_string = ", pure";
++ attr_string = ", const";
+ }
+ else if ((classify & RS6000_BTC_PURE) != 0)
+ {
+@@ -12268,7 +12490,7 @@
+ external state. */
+ DECL_PURE_P (t) = 1;
+ TREE_NOTHROW (t) = 1;
+- attr_string = ", const";
++ attr_string = ", pure";
+ }
+ else if ((classify & RS6000_BTC_FP) != 0)
+ {
+@@ -12300,6 +12522,7 @@
+
+ /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12312,6 +12535,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
+@@ -12333,6 +12557,7 @@
+
+ /* DST operations: void foo (void *, const int, const char). */
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12345,6 +12570,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
+@@ -12366,6 +12592,7 @@
+
+ /* Simple binary operations: VECc = foo (VECa, VECb). */
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12378,6 +12605,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
+ { MASK, ICODE, NAME, ENUM },
+@@ -12397,6 +12625,7 @@
+ #include "rs6000-builtin.def"
+ };
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12409,6 +12638,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
+@@ -12431,6 +12661,7 @@
+ };
+
+ /* SPE predicates. */
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12443,6 +12674,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
+@@ -12463,6 +12695,7 @@
+ };
+
+ /* SPE evsel predicates. */
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12475,6 +12708,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
+@@ -12495,6 +12729,7 @@
+ };
+
+ /* PAIRED predicates. */
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12507,6 +12742,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
+@@ -12528,6 +12764,7 @@
+
+ /* ABS* operations. */
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12540,6 +12777,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
+@@ -12562,6 +12800,7 @@
+ /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
+ foo (VECa). */
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12574,6 +12813,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
+ { MASK, ICODE, NAME, ENUM },
+
+@@ -12593,7 +12833,43 @@
+ #include "rs6000-builtin.def"
+ };
+
++/* Simple no-argument operations: result = __builtin_darn_32 () */
++
++#undef RS6000_BUILTIN_0
++#undef RS6000_BUILTIN_1
++#undef RS6000_BUILTIN_2
++#undef RS6000_BUILTIN_3
++#undef RS6000_BUILTIN_A
++#undef RS6000_BUILTIN_D
++#undef RS6000_BUILTIN_E
++#undef RS6000_BUILTIN_H
++#undef RS6000_BUILTIN_P
++#undef RS6000_BUILTIN_Q
++#undef RS6000_BUILTIN_S
++#undef RS6000_BUILTIN_X
++
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
++ { MASK, ICODE, NAME, ENUM },
++
++#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
++#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
++
++static const struct builtin_description bdesc_0arg[] =
++{
++#include "rs6000-builtin.def"
++};
++
+ /* HTM builtins. */
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -12606,6 +12882,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
+@@ -12625,6 +12902,7 @@
+ #include "rs6000-builtin.def"
+ };
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -14129,6 +14407,47 @@
+ case VSX_BUILTIN_STXVW4X_V16QI:
+ return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
+
++ /* For the following on big endian, it's ok to use any appropriate
++ unaligned-supporting store, so use a generic expander. For
++ little-endian, the exact element-reversing instruction must
++ be used. */
++ case VSX_BUILTIN_ST_ELEMREV_V2DF:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
++ : CODE_FOR_vsx_st_elemrev_v2df);
++ return altivec_expand_stv_builtin (code, exp);
++ }
++ case VSX_BUILTIN_ST_ELEMREV_V2DI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
++ : CODE_FOR_vsx_st_elemrev_v2di);
++ return altivec_expand_stv_builtin (code, exp);
++ }
++ case VSX_BUILTIN_ST_ELEMREV_V4SF:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
++ : CODE_FOR_vsx_st_elemrev_v4sf);
++ return altivec_expand_stv_builtin (code, exp);
++ }
++ case VSX_BUILTIN_ST_ELEMREV_V4SI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
++ : CODE_FOR_vsx_st_elemrev_v4si);
++ return altivec_expand_stv_builtin (code, exp);
++ }
++ case VSX_BUILTIN_ST_ELEMREV_V8HI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
++ : CODE_FOR_vsx_st_elemrev_v8hi);
++ return altivec_expand_stv_builtin (code, exp);
++ }
++ case VSX_BUILTIN_ST_ELEMREV_V16QI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
++ : CODE_FOR_vsx_st_elemrev_v16qi);
++ return altivec_expand_stv_builtin (code, exp);
++ }
++
+ case ALTIVEC_BUILTIN_MFVSCR:
+ icode = CODE_FOR_altivec_mfvscr;
+ tmode = insn_data[icode].operand[0].mode;
+@@ -14323,6 +14642,46 @@
+ case VSX_BUILTIN_LXVW4X_V16QI:
+ return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
+ exp, target, false);
++ /* For the following on big endian, it's ok to use any appropriate
++ unaligned-supporting load, so use a generic expander. For
++ little-endian, the exact element-reversing instruction must
++ be used. */
++ case VSX_BUILTIN_LD_ELEMREV_V2DF:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
++ : CODE_FOR_vsx_ld_elemrev_v2df);
++ return altivec_expand_lv_builtin (code, exp, target, false);
++ }
++ case VSX_BUILTIN_LD_ELEMREV_V2DI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
++ : CODE_FOR_vsx_ld_elemrev_v2di);
++ return altivec_expand_lv_builtin (code, exp, target, false);
++ }
++ case VSX_BUILTIN_LD_ELEMREV_V4SF:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
++ : CODE_FOR_vsx_ld_elemrev_v4sf);
++ return altivec_expand_lv_builtin (code, exp, target, false);
++ }
++ case VSX_BUILTIN_LD_ELEMREV_V4SI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
++ : CODE_FOR_vsx_ld_elemrev_v4si);
++ return altivec_expand_lv_builtin (code, exp, target, false);
++ }
++ case VSX_BUILTIN_LD_ELEMREV_V8HI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
++ : CODE_FOR_vsx_ld_elemrev_v8hi);
++ return altivec_expand_lv_builtin (code, exp, target, false);
++ }
++ case VSX_BUILTIN_LD_ELEMREV_V16QI:
++ {
++ enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
++ : CODE_FOR_vsx_ld_elemrev_v16qi);
++ return altivec_expand_lv_builtin (code, exp, target, false);
++ }
+ break;
+ default:
+ break;
+@@ -14792,6 +15151,8 @@
+ error ("Builtin function %s requires the -mhard-dfp option", name);
+ else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
+ error ("Builtin function %s requires the -mpower8-vector option", name);
++ else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
++ error ("Builtin function %s requires the -mpower9-vector option", name);
+ else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
+ == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
+ error ("Builtin function %s requires the -mhard-float and"
+@@ -14990,9 +15351,11 @@
+ }
+
+ unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
++ /* RS6000_BTC_SPECIAL represents no-operand operators. */
+ gcc_assert (attr == RS6000_BTC_UNARY
+ || attr == RS6000_BTC_BINARY
+- || attr == RS6000_BTC_TERNARY);
++ || attr == RS6000_BTC_TERNARY
++ || attr == RS6000_BTC_SPECIAL);
+
+ /* Handle simple unary operations. */
+ d = bdesc_1arg;
+@@ -15012,6 +15375,12 @@
+ if (d->code == fcode)
+ return rs6000_expand_ternop_builtin (d->icode, exp, target);
+
++ /* Handle simple no-argument operations. */
++ d = bdesc_0arg;
++ for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
++ if (d->code == fcode)
++ return rs6000_expand_zeroop_builtin (d->icode, target);
++
+ gcc_unreachable ();
+ }
+
+@@ -15816,10 +16185,44 @@
+ VSX_BUILTIN_STXVW4X_V8HI);
+ def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
+ VSX_BUILTIN_STXVW4X_V16QI);
++
++ def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
++ VSX_BUILTIN_LD_ELEMREV_V2DF);
++ def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
++ VSX_BUILTIN_LD_ELEMREV_V2DI);
++ def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
++ VSX_BUILTIN_LD_ELEMREV_V4SF);
++ def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
++ VSX_BUILTIN_LD_ELEMREV_V4SI);
++ def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
++ VSX_BUILTIN_ST_ELEMREV_V2DF);
++ def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
++ VSX_BUILTIN_ST_ELEMREV_V2DI);
++ def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
++ VSX_BUILTIN_ST_ELEMREV_V4SF);
++ def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
++ VSX_BUILTIN_ST_ELEMREV_V4SI);
++
++ if (TARGET_P9_VECTOR)
++ {
++ def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
++ VSX_BUILTIN_LD_ELEMREV_V8HI);
++ def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
++ VSX_BUILTIN_LD_ELEMREV_V16QI);
++ def_builtin ("__builtin_vsx_st_elemrev_v8hi",
++ void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
++ def_builtin ("__builtin_vsx_st_elemrev_v16qi",
++ void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
++ }
++
+ def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
+ VSX_BUILTIN_VEC_LD);
+ def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
+ VSX_BUILTIN_VEC_ST);
++ def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
++ VSX_BUILTIN_VEC_XL);
++ def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
++ VSX_BUILTIN_VEC_XST);
+
+ def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
+ def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
+@@ -16351,10 +16754,6 @@
+ while (num_args > 0 && h.mode[num_args] == VOIDmode)
+ num_args--;
+
+- if (num_args == 0)
+- fatal_error (input_location,
+- "internal error: builtin function %s had no type", name);
+-
+ ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
+ if (!ret_type && h.uns_p[0])
+ ret_type = builtin_mode_to_type[h.mode[0]][0];
+@@ -16406,6 +16805,7 @@
+ tree opaque_ftype_opaque = NULL_TREE;
+ tree opaque_ftype_opaque_opaque = NULL_TREE;
+ tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
++ tree v2si_ftype = NULL_TREE;
+ tree v2si_ftype_qi = NULL_TREE;
+ tree v2si_ftype_v2si_qi = NULL_TREE;
+ tree v2si_ftype_int_qi = NULL_TREE;
+@@ -16622,6 +17022,64 @@
+
+ def_builtin (d->name, type, d->code);
+ }
++
++ /* Add the simple no-argument operators. */
++ d = bdesc_0arg;
++ for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
++ {
++ machine_mode mode0;
++ tree type;
++ HOST_WIDE_INT mask = d->mask;
++
++ if ((mask & builtin_mask) != mask)
++ {
++ if (TARGET_DEBUG_BUILTIN)
++ fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
++ continue;
++ }
++ if (rs6000_overloaded_builtin_p (d->code))
++ {
++ if (!opaque_ftype_opaque)
++ opaque_ftype_opaque
++ = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
++ type = opaque_ftype_opaque;
++ }
++ else
++ {
++ enum insn_code icode = d->icode;
++ if (d->name == 0)
++ {
++ if (TARGET_DEBUG_BUILTIN)
++ fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
++ (long unsigned) i);
++ continue;
++ }
++ if (icode == CODE_FOR_nothing)
++ {
++ if (TARGET_DEBUG_BUILTIN)
++ fprintf (stderr,
++ "rs6000_builtin, skip no-argument %s (no code)\n",
++ d->name);
++ continue;
++ }
++ mode0 = insn_data[icode].operand[0].mode;
++ if (mode0 == V2SImode)
++ {
++ /* code for SPE */
++ if (! (type = v2si_ftype))
++ {
++ v2si_ftype
++ = build_function_type_list (opaque_V2SI_type_node,
++ NULL_TREE);
++ type = v2si_ftype;
++ }
++ }
++ else
++ type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
++ d->code, d->name);
++ }
++ def_builtin (d->name, type, d->code);
++ }
+ }
+
+ /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
+@@ -18152,6 +18610,16 @@
+ }
+ }
+
++ else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
++ && CONST_INT_P (plus_arg1))
++ {
++ if (!quad_address_offset_p (INTVAL (plus_arg1)))
++ {
++ extra_cost = 1;
++ type = "vector d-form offset";
++ }
++ }
++
+ /* Make sure the register class can handle offset addresses. */
+ else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
+ {
+@@ -18158,7 +18626,7 @@
+ if ((addr_mask & RELOAD_REG_OFFSET) == 0)
+ {
+ extra_cost = 1;
+- type = "offset";
++ type = "offset #2";
+ }
+ }
+
+@@ -18171,8 +18639,15 @@
+ break;
+
+ case LO_SUM:
+- if (!legitimate_lo_sum_address_p (mode, addr, false))
++ /* Quad offsets are restricted and can't handle normal addresses. */
++ if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
+ {
++ extra_cost = -1;
++ type = "vector d-form lo_sum";
++ }
++
++ else if (!legitimate_lo_sum_address_p (mode, addr, false))
++ {
+ fail_msg = "bad LO_SUM";
+ extra_cost = -1;
+ }
+@@ -18188,8 +18663,17 @@
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+- type = "address";
+- extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
++ if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
++ {
++ extra_cost = -1;
++ type = "vector d-form lo_sum #2";
++ }
++
++ else
++ {
++ type = "address";
++ extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
++ }
+ break;
+
+ /* TOC references look like offsetable memory. */
+@@ -18200,6 +18684,12 @@
+ extra_cost = -1;
+ }
+
++ else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
++ {
++ extra_cost = -1;
++ type = "vector d-form lo_sum #3";
++ }
++
+ else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
+ {
+ extra_cost = 1;
+@@ -18827,6 +19317,16 @@
+ }
+ }
+
++ else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
++ {
++ if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
++ || !quad_address_p (addr, mode, false))
++ {
++ emit_insn (gen_rtx_SET (scratch, addr));
++ new_addr = scratch;
++ }
++ }
++
+ /* Make sure the register class can handle offset addresses. */
+ else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
+ {
+@@ -18857,6 +19357,13 @@
+ }
+ }
+
++ /* Quad offsets are restricted and can't handle normal addresses. */
++ else if (mode_supports_vsx_dform_quad (mode))
++ {
++ emit_insn (gen_rtx_SET (scratch, addr));
++ new_addr = scratch;
++ }
++
+ /* Make sure the register class can handle offset addresses. */
+ else if (legitimate_lo_sum_address_p (mode, addr, false))
+ {
+@@ -19067,7 +19574,8 @@
+ }
+
+ /* D-form addressing can easily reload the value. */
+- if (mode_supports_vmx_dform (mode))
++ if (mode_supports_vmx_dform (mode)
++ || mode_supports_vsx_dform_quad (mode))
+ return rclass;
+
+ /* If this is a scalar floating point value and we don't have D-form
+@@ -19483,8 +19991,16 @@
+
+ else if (TARGET_VSX && dest_vsx_p)
+ {
+- if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
++ if (mode_supports_vsx_dform_quad (mode)
++ && quad_address_p (XEXP (src, 0), mode, false))
++ return "lxv %x0,%1";
++
++ else if (TARGET_P9_VECTOR)
++ return "lxvx %x0,%y1";
++
++ else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
+ return "lxvw4x %x0,%y1";
++
+ else
+ return "lxvd2x %x0,%y1";
+ }
+@@ -19513,8 +20029,16 @@
+
+ else if (TARGET_VSX && src_vsx_p)
+ {
+- if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
++ if (mode_supports_vsx_dform_quad (mode)
++ && quad_address_p (XEXP (dest, 0), mode, false))
++ return "stxv %x1,%0";
++
++ else if (TARGET_P9_VECTOR)
++ return "stxvx %x1,%y0";
++
++ else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
+ return "stxvw4x %x1,%y0";
++
+ else
+ return "stxvd2x %x1,%y0";
+ }
+@@ -25949,7 +26473,7 @@
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx areg, savereg, mem;
+- int offset;
++ HOST_WIDE_INT offset;
+
+ offset = (info->altivec_save_offset + frame_off
+ + 16 * (i - info->first_altivec_reg_save));
+@@ -25956,18 +26480,30 @@
+
+ savereg = gen_rtx_REG (V4SImode, i);
+
+- NOT_INUSE (0);
+- areg = gen_rtx_REG (Pmode, 0);
+- emit_move_insn (areg, GEN_INT (offset));
++ if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
++ {
++ mem = gen_frame_mem (V4SImode,
++ gen_rtx_PLUS (Pmode, frame_reg_rtx,
++ GEN_INT (offset)));
++ insn = emit_insn (gen_rtx_SET (mem, savereg));
++ areg = NULL_RTX;
++ }
++ else
++ {
++ NOT_INUSE (0);
++ areg = gen_rtx_REG (Pmode, 0);
++ emit_move_insn (areg, GEN_INT (offset));
+
+- /* AltiVec addressing mode is [reg+reg]. */
+- mem = gen_frame_mem (V4SImode,
+- gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
++ /* AltiVec addressing mode is [reg+reg]. */
++ mem = gen_frame_mem (V4SImode,
++ gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
+
+- /* Rather than emitting a generic move, force use of the stvx
+- instruction, which we always want. In particular we don't
+- want xxpermdi/stxvd2x for little endian. */
+- insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
++ /* Rather than emitting a generic move, force use of the stvx
++ instruction, which we always want on ISA 2.07 (power8) systems.
++ In particular we don't want xxpermdi/stxvd2x for little
++ endian. */
++ insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
++ }
+
+ rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
+ areg, GEN_INT (offset));
+@@ -26687,23 +27223,35 @@
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+- rtx addr, areg, mem, reg;
++ rtx addr, areg, mem, insn;
++ rtx reg = gen_rtx_REG (V4SImode, i);
++ HOST_WIDE_INT offset
++ = (info->altivec_save_offset + frame_off
++ + 16 * (i - info->first_altivec_reg_save));
+
+- areg = gen_rtx_REG (Pmode, 0);
+- emit_move_insn
+- (areg, GEN_INT (info->altivec_save_offset
+- + frame_off
+- + 16 * (i - info->first_altivec_reg_save)));
++ if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
++ {
++ mem = gen_frame_mem (V4SImode,
++ gen_rtx_PLUS (Pmode, frame_reg_rtx,
++ GEN_INT (offset)));
++ insn = gen_rtx_SET (reg, mem);
++ }
++ else
++ {
++ areg = gen_rtx_REG (Pmode, 0);
++ emit_move_insn (areg, GEN_INT (offset));
+
+- /* AltiVec addressing mode is [reg+reg]. */
+- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
+- mem = gen_frame_mem (V4SImode, addr);
++ /* AltiVec addressing mode is [reg+reg]. */
++ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
++ mem = gen_frame_mem (V4SImode, addr);
+
+- reg = gen_rtx_REG (V4SImode, i);
+- /* Rather than emitting a generic move, force use of the
+- lvx instruction, which we always want. In particular
+- we don't want lxvd2x/xxpermdi for little endian. */
+- (void) emit_insn (gen_altivec_lvx_v4si_internal (reg, mem));
++ /* Rather than emitting a generic move, force use of the
++ lvx instruction, which we always want. In particular we
++ don't want lxvd2x/xxpermdi for little endian. */
++ insn = gen_altivec_lvx_v4si_internal (reg, mem);
++ }
++
++ (void) emit_insn (insn);
+ }
+ }
+
+@@ -26890,23 +27438,35 @@
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+- rtx addr, areg, mem, reg;
++ rtx addr, areg, mem, insn;
++ rtx reg = gen_rtx_REG (V4SImode, i);
++ HOST_WIDE_INT offset
++ = (info->altivec_save_offset + frame_off
++ + 16 * (i - info->first_altivec_reg_save));
+
+- areg = gen_rtx_REG (Pmode, 0);
+- emit_move_insn
+- (areg, GEN_INT (info->altivec_save_offset
+- + frame_off
+- + 16 * (i - info->first_altivec_reg_save)));
++ if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
++ {
++ mem = gen_frame_mem (V4SImode,
++ gen_rtx_PLUS (Pmode, frame_reg_rtx,
++ GEN_INT (offset)));
++ insn = gen_rtx_SET (reg, mem);
++ }
++ else
++ {
++ areg = gen_rtx_REG (Pmode, 0);
++ emit_move_insn (areg, GEN_INT (offset));
+
+- /* AltiVec addressing mode is [reg+reg]. */
+- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
+- mem = gen_frame_mem (V4SImode, addr);
++ /* AltiVec addressing mode is [reg+reg]. */
++ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
++ mem = gen_frame_mem (V4SImode, addr);
+
+- reg = gen_rtx_REG (V4SImode, i);
+- /* Rather than emitting a generic move, force use of the
+- lvx instruction, which we always want. In particular
+- we don't want lxvd2x/xxpermdi for little endian. */
+- (void) emit_insn (gen_altivec_lvx_v4si_internal (reg, mem));
++ /* Rather than emitting a generic move, force use of the
++ lvx instruction, which we always want. In particular we
++ don't want lxvd2x/xxpermdi for little endian. */
++ insn = gen_altivec_lvx_v4si_internal (reg, mem);
++ }
++
++ (void) emit_insn (insn);
+ }
+ }
+
+@@ -27724,6 +28284,11 @@
const0_rtx, const0_rtx));
call_fusage = NULL_RTX;
use_reg (&call_fusage, r12);
@@ -793711,11 +799861,345 @@ Index: gcc/config/rs6000/rs6000.c
add_function_usage_to (insn, call_fusage);
emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
insn = emit_move_insn (lr, r0);
+@@ -33448,17 +34013,25 @@
+ if (!REG_P (target))
+ tmp = gen_reg_rtx (mode);
+
+- /* Invert the selector with a VNAND if available, else a VNOR.
+- The VNAND is preferred for future fusion opportunities. */
+- notx = gen_rtx_NOT (V16QImode, sel);
+- iorx = (TARGET_P8_VECTOR
+- ? gen_rtx_IOR (V16QImode, notx, notx)
+- : gen_rtx_AND (V16QImode, notx, notx));
+- emit_insn (gen_rtx_SET (norreg, iorx));
++ if (TARGET_P9_VECTOR)
++ {
++ unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
++ UNSPEC_VPERMR);
++ }
++ else
++ {
++ /* Invert the selector with a VNAND if available, else a VNOR.
++ The VNAND is preferred for future fusion opportunities. */
++ notx = gen_rtx_NOT (V16QImode, sel);
++ iorx = (TARGET_P8_VECTOR
++ ? gen_rtx_IOR (V16QImode, notx, notx)
++ : gen_rtx_AND (V16QImode, notx, notx));
++ emit_insn (gen_rtx_SET (norreg, iorx));
+
+- /* Permute with operands reversed and adjusted selector. */
+- unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
+- UNSPEC_VPERM);
++ /* Permute with operands reversed and adjusted selector. */
++ unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
++ UNSPEC_VPERM);
++ }
+
+ /* Copy into target, possibly by way of a register. */
+ if (!REG_P (target))
+@@ -34056,7 +34629,7 @@
+ static bool
+ rs6000_lra_p (void)
+ {
+- return rs6000_lra_flag;
++ return TARGET_LRA;
+ }
+
+ /* Given FROM and TO register numbers, say whether this elimination is allowed.
+@@ -34417,7 +34990,8 @@
+ { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
+ { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
+ { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
+- { "power9-dform", OPTION_MASK_P9_DFORM, false, true },
++ { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
++ { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
+ { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
+ { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
+ { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
+@@ -34474,6 +35048,7 @@
+ { "popcntd", RS6000_BTM_POPCNTD, false, false },
+ { "cell", RS6000_BTM_CELL, false, false },
+ { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
++ { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
+ { "crypto", RS6000_BTM_CRYPTO, false, false },
+ { "htm", RS6000_BTM_HTM, false, false },
+ { "hard-dfp", RS6000_BTM_DFP, false, false },
+@@ -35049,7 +35624,9 @@
+ size_t i;
+ size_t start_column = 0;
+ size_t cur_column;
+- size_t max_column = 76;
++ size_t max_column = 120;
++ size_t prefix_len = strlen (prefix);
++ size_t comma_len = 0;
+ const char *comma = "";
+
+ if (indent)
+@@ -35067,27 +35644,45 @@
+ cur_column = start_column;
+ for (i = 0; i < num_elements; i++)
+ {
+- if ((flags & opts[i].mask) != 0)
++ bool invert = opts[i].invert;
++ const char *name = opts[i].name;
++ const char *no_str = "";
++ HOST_WIDE_INT mask = opts[i].mask;
++ size_t len = comma_len + prefix_len + strlen (name);
++
++ if (!invert)
+ {
+- const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
+- size_t len = (strlen (comma)
+- + strlen (prefix)
+- + strlen (no_str)
+- + strlen (rs6000_opt_masks[i].name));
++ if ((flags & mask) == 0)
++ {
++ no_str = "no-";
++ len += sizeof ("no-") - 1;
++ }
+
+- cur_column += len;
+- if (cur_column > max_column)
++ flags &= ~mask;
++ }
++
++ else
++ {
++ if ((flags & mask) != 0)
+ {
+- fprintf (stderr, ", \\\n%*s", (int)start_column, "");
+- cur_column = start_column + len;
+- comma = "";
++ no_str = "no-";
++ len += sizeof ("no-") - 1;
+ }
+
+- fprintf (file, "%s%s%s%s", comma, prefix, no_str,
+- rs6000_opt_masks[i].name);
+- flags &= ~ opts[i].mask;
+- comma = ", ";
++ flags |= mask;
+ }
++
++ cur_column += len;
++ if (cur_column > max_column)
++ {
++ fprintf (stderr, ", \\\n%*s", (int)start_column, "");
++ cur_column = start_column + len;
++ comma = "";
++ }
++
++ fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
++ comma = ", ";
++ comma_len = sizeof (", ") - 1;
+ }
+
+ fputs ("\n", file);
Index: gcc/config/rs6000/vsx.md
===================================================================
--- a/src/gcc/config/rs6000/vsx.md (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/config/rs6000/vsx.md (.../branches/gcc-6-branch)
-@@ -1583,10 +1583,15 @@
+@@ -301,24 +301,6 @@
+ UNSPEC_VSX_XVCVDPUXDS
+ ])
+
+-;; VSX (P9) moves
+-
+-(define_insn "*p9_vecload_<mode>"
+- [(set (match_operand:VSX_M2 0 "vsx_register_operand" "=<VSa>")
+- (match_operand:VSX_M2 1 "memory_operand" "Z"))]
+- "TARGET_P9_VECTOR"
+- "lxvx %x0,%y1"
+- [(set_attr "type" "vecload")
+- (set_attr "length" "4")])
+-
+-(define_insn "*p9_vecstore_<mode>"
+- [(set (match_operand:VSX_M2 0 "memory_operand" "=Z")
+- (match_operand:VSX_M2 1 "vsx_register_operand" "<VSa>"))]
+- "TARGET_P9_VECTOR"
+- "stxvx %x1,%y0"
+- [(set_attr "type" "vecstore")
+- (set_attr "length" "4")])
+-
+ ;; VSX moves
+
+ ;; The patterns for LE permuted loads and stores come before the general
+@@ -788,8 +770,8 @@
+ "")
+
+ (define_insn "*vsx_mov<mode>"
+- [(set (match_operand:VSX_M 0 "nonimmediate_operand" "=Z,<VSr>,<VSr>,?Z,?<VSa>,?<VSa>,r,we,wQ,?&r,??Y,??r,??r,<VSr>,?<VSa>,*r,v,wZ,v")
+- (match_operand:VSX_M 1 "input_operand" "<VSr>,Z,<VSr>,<VSa>,Z,<VSa>,we,b,r,wQ,r,Y,r,j,j,j,W,v,wZ"))]
++ [(set (match_operand:VSX_M 0 "nonimmediate_operand" "=ZwO,<VSr>,<VSr>,?ZwO,?<VSa>,?<VSa>,r,we,wQ,?&r,??Y,??r,??r,<VSr>,?<VSa>,*r,v,wZ,v")
++ (match_operand:VSX_M 1 "input_operand" "<VSr>,ZwO,<VSr>,<VSa>,ZwO,<VSa>,we,b,r,wQ,r,Y,r,j,j,j,W,v,wZ"))]
+ "VECTOR_MEM_VSX_P (<MODE>mode)
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+@@ -803,8 +785,8 @@
+ ;; use of TImode is for unions. However for plain data movement, slightly
+ ;; favor the vector loads
+ (define_insn "*vsx_movti_64bit"
+- [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,wa,wa,wa,r,we,v,v,wZ,wQ,&r,Y,r,r,?r")
+- (match_operand:TI 1 "input_operand" "wa,Z,wa,O,we,b,W,wZ,v,r,wQ,r,Y,r,n"))]
++ [(set (match_operand:TI 0 "nonimmediate_operand" "=ZwO,wa,wa,wa,r,we,v,v,wZ,wQ,&r,Y,r,r,?r")
++ (match_operand:TI 1 "input_operand" "wa,ZwO,wa,O,we,b,W,wZ,v,r,wQ,r,Y,r,n"))]
+ "TARGET_POWERPC64 && VECTOR_MEM_VSX_P (TImode)
+ && (register_operand (operands[0], TImode)
+ || register_operand (operands[1], TImode))"
+@@ -815,8 +797,8 @@
+ (set_attr "length" "4,4,4,4,8,4,16,4,4,8,8,8,8,8,8")])
+
+ (define_insn "*vsx_movti_32bit"
+- [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,wa,wa,wa,v, v,wZ,Q,Y,????r,????r,????r,r")
+- (match_operand:TI 1 "input_operand" "wa, Z,wa, O,W,wZ, v,r,r, Q, Y, r,n"))]
++ [(set (match_operand:TI 0 "nonimmediate_operand" "=ZwO,wa,wa,wa,v,v,wZ,Q,Y,????r,????r,????r,r")
++ (match_operand:TI 1 "input_operand" "wa,ZwO,wa,O,W,wZ,v,r,r,Q,Y,r,n"))]
+ "! TARGET_POWERPC64 && VECTOR_MEM_VSX_P (TImode)
+ && (register_operand (operands[0], TImode)
+ || register_operand (operands[1], TImode))"
+@@ -887,6 +869,140 @@
+ "VECTOR_MEM_VSX_P (<MODE>mode)"
+ "")
+
++;; Explicit load/store expanders for the builtin functions for lxvd2x, etc.,
++;; when you really want their element-reversing behavior.
++(define_insn "vsx_ld_elemrev_v2di"
++ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
++ (vec_select:V2DI
++ (match_operand:V2DI 1 "memory_operand" "Z")
++ (parallel [(const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V2DImode) && !BYTES_BIG_ENDIAN"
++ "lxvd2x %x0,%y1"
++ [(set_attr "type" "vecload")])
++
++(define_insn "vsx_ld_elemrev_v2df"
++ [(set (match_operand:V2DF 0 "vsx_register_operand" "=wa")
++ (vec_select:V2DF
++ (match_operand:V2DF 1 "memory_operand" "Z")
++ (parallel [(const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V2DFmode) && !BYTES_BIG_ENDIAN"
++ "lxvd2x %x0,%y1"
++ [(set_attr "type" "vecload")])
++
++(define_insn "vsx_ld_elemrev_v4si"
++ [(set (match_operand:V4SI 0 "vsx_register_operand" "=wa")
++ (vec_select:V4SI
++ (match_operand:V4SI 1 "memory_operand" "Z")
++ (parallel [(const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V4SImode) && !BYTES_BIG_ENDIAN"
++ "lxvw4x %x0,%y1"
++ [(set_attr "type" "vecload")])
++
++(define_insn "vsx_ld_elemrev_v4sf"
++ [(set (match_operand:V4SF 0 "vsx_register_operand" "=wa")
++ (vec_select:V4SF
++ (match_operand:V4SF 1 "memory_operand" "Z")
++ (parallel [(const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V4SFmode) && !BYTES_BIG_ENDIAN"
++ "lxvw4x %x0,%y1"
++ [(set_attr "type" "vecload")])
++
++(define_insn "vsx_ld_elemrev_v8hi"
++ [(set (match_operand:V8HI 0 "vsx_register_operand" "=wa")
++ (vec_select:V8HI
++ (match_operand:V8HI 1 "memory_operand" "Z")
++ (parallel [(const_int 7) (const_int 6)
++ (const_int 5) (const_int 4)
++ (const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V8HImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
++ "lxvh8x %x0,%y1"
++ [(set_attr "type" "vecload")])
++
++(define_insn "vsx_ld_elemrev_v16qi"
++ [(set (match_operand:V16QI 0 "vsx_register_operand" "=wa")
++ (vec_select:V16QI
++ (match_operand:V16QI 1 "memory_operand" "Z")
++ (parallel [(const_int 15) (const_int 14)
++ (const_int 13) (const_int 12)
++ (const_int 11) (const_int 10)
++ (const_int 9) (const_int 8)
++ (const_int 7) (const_int 6)
++ (const_int 5) (const_int 4)
++ (const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V16QImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
++ "lxvb16x %x0,%y1"
++ [(set_attr "type" "vecload")])
++
++(define_insn "vsx_st_elemrev_v2df"
++ [(set (match_operand:V2DF 0 "memory_operand" "=Z")
++ (vec_select:V2DF
++ (match_operand:V2DF 1 "vsx_register_operand" "wa")
++ (parallel [(const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V2DFmode) && !BYTES_BIG_ENDIAN"
++ "stxvd2x %x1,%y0"
++ [(set_attr "type" "vecstore")])
++
++(define_insn "vsx_st_elemrev_v2di"
++ [(set (match_operand:V2DI 0 "memory_operand" "=Z")
++ (vec_select:V2DI
++ (match_operand:V2DI 1 "vsx_register_operand" "wa")
++ (parallel [(const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V2DImode) && !BYTES_BIG_ENDIAN"
++ "stxvd2x %x1,%y0"
++ [(set_attr "type" "vecstore")])
++
++(define_insn "vsx_st_elemrev_v4sf"
++ [(set (match_operand:V4SF 0 "memory_operand" "=Z")
++ (vec_select:V4SF
++ (match_operand:V4SF 1 "vsx_register_operand" "wa")
++ (parallel [(const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V4SFmode) && !BYTES_BIG_ENDIAN"
++ "stxvw4x %x1,%y0"
++ [(set_attr "type" "vecstore")])
++
++(define_insn "vsx_st_elemrev_v4si"
++ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
++ (vec_select:V4SI
++ (match_operand:V4SI 1 "vsx_register_operand" "wa")
++ (parallel [(const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V4SImode) && !BYTES_BIG_ENDIAN"
++ "stxvw4x %x1,%y0"
++ [(set_attr "type" "vecstore")])
++
++(define_insn "vsx_st_elemrev_v8hi"
++ [(set (match_operand:V8HI 0 "memory_operand" "=Z")
++ (vec_select:V8HI
++ (match_operand:V8HI 1 "vsx_register_operand" "wa")
++ (parallel [(const_int 7) (const_int 6)
++ (const_int 5) (const_int 4)
++ (const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V8HImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
++ "stxvh8x %x1,%y0"
++ [(set_attr "type" "vecstore")])
++
++(define_insn "vsx_st_elemrev_v16qi"
++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
++ (vec_select:V16QI
++ (match_operand:V16QI 1 "vsx_register_operand" "wa")
++ (parallel [(const_int 15) (const_int 14)
++ (const_int 13) (const_int 12)
++ (const_int 11) (const_int 10)
++ (const_int 9) (const_int 8)
++ (const_int 7) (const_int 6)
++ (const_int 5) (const_int 4)
++ (const_int 3) (const_int 2)
++ (const_int 1) (const_int 0)])))]
++ "VECTOR_MEM_VSX_P (V16QImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
++ "stxvb16x %x1,%y0"
++ [(set_attr "type" "vecstore")])
++
+
+ ;; VSX vector floating point arithmetic instructions. The VSX scalar
+ ;; instructions are now combined with the insn for the traditional floating
+@@ -1583,10 +1699,15 @@
{
rtx op0 = operands[0];
rtx op1 = operands[1];
@@ -793735,7 +800219,7 @@ Index: gcc/config/rs6000/vsx.md
emit_insn (gen_vsx_xvcvdpsxds (op0, tmp));
DONE;
})
-@@ -1607,10 +1612,15 @@
+@@ -1607,10 +1728,15 @@
{
rtx op0 = operands[0];
rtx op1 = operands[1];
@@ -793755,11 +800239,392 @@ Index: gcc/config/rs6000/vsx.md
emit_insn (gen_vsx_xvcvdpuxds (op0, tmp));
DONE;
})
+Index: gcc/config/rs6000/rs6000.h
+===================================================================
+--- a/src/gcc/config/rs6000/rs6000.h (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/rs6000.h (.../branches/gcc-6-branch)
+@@ -615,6 +615,7 @@
+ #define MASK_MULTIPLE OPTION_MASK_MULTIPLE
+ #define MASK_NO_UPDATE OPTION_MASK_NO_UPDATE
+ #define MASK_P8_VECTOR OPTION_MASK_P8_VECTOR
++#define MASK_P9_VECTOR OPTION_MASK_P9_VECTOR
+ #define MASK_POPCNTB OPTION_MASK_POPCNTB
+ #define MASK_POPCNTD OPTION_MASK_POPCNTD
+ #define MASK_PPC_GFXOPT OPTION_MASK_PPC_GFXOPT
+@@ -655,6 +656,11 @@
+ #define MASK_PROTOTYPE OPTION_MASK_PROTOTYPE
+ #endif
+
++#ifdef TARGET_MODULO
++#define RS6000_BTM_MODULO OPTION_MASK_MODULO
++#endif
++
++
+ /* For power systems, we want to enable Altivec and VSX builtins even if the
+ user did not use -maltivec or -mvsx to allow the builtins to be used inside
+ of #pragma GCC target or the target attribute to change the code level for a
+@@ -2638,7 +2644,9 @@
+
+ #define RS6000_BTC_MISC 0x00000000 /* No special attributes. */
+ #define RS6000_BTC_CONST 0x00000100 /* uses no global state. */
+-#define RS6000_BTC_PURE 0x00000200 /* reads global state/mem. */
++#define RS6000_BTC_PURE 0x00000200 /* reads global
++ state/mem and does
++ not modify global state. */
+ #define RS6000_BTC_FP 0x00000400 /* depends on rounding mode. */
+ #define RS6000_BTC_ATTR_MASK 0x00000700 /* Mask of the attributes. */
+
+@@ -2660,6 +2668,7 @@
+ #define RS6000_BTM_ALTIVEC MASK_ALTIVEC /* VMX/altivec vectors. */
+ #define RS6000_BTM_VSX MASK_VSX /* VSX (vector/scalar). */
+ #define RS6000_BTM_P8_VECTOR MASK_P8_VECTOR /* ISA 2.07 vector. */
++#define RS6000_BTM_P9_VECTOR MASK_P9_VECTOR /* ISA 3.00 vector. */
+ #define RS6000_BTM_CRYPTO MASK_CRYPTO /* crypto funcs. */
+ #define RS6000_BTM_HTM MASK_HTM /* hardware TM funcs. */
+ #define RS6000_BTM_SPE MASK_STRING /* E500 */
+@@ -2673,10 +2682,12 @@
+ #define RS6000_BTM_DFP MASK_DFP /* Decimal floating point. */
+ #define RS6000_BTM_HARD_FLOAT MASK_SOFT_FLOAT /* Hardware floating point. */
+ #define RS6000_BTM_LDBL128 MASK_MULTIPLE /* 128-bit long double. */
++#define RS6000_BTM_64BIT MASK_64BIT /* 64-bit addressing. */
+
+ #define RS6000_BTM_COMMON (RS6000_BTM_ALTIVEC \
+ | RS6000_BTM_VSX \
+ | RS6000_BTM_P8_VECTOR \
++ | RS6000_BTM_P9_VECTOR \
+ | RS6000_BTM_CRYPTO \
+ | RS6000_BTM_FRE \
+ | RS6000_BTM_FRES \
+@@ -2691,6 +2702,7 @@
+
+ /* Define builtin enum index. */
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+@@ -2703,6 +2715,7 @@
+ #undef RS6000_BUILTIN_S
+ #undef RS6000_BUILTIN_X
+
++#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) ENUM,
+ #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) ENUM,
+ #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) ENUM,
+ #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) ENUM,
+@@ -2722,6 +2735,7 @@
+ RS6000_BUILTIN_COUNT
+ };
+
++#undef RS6000_BUILTIN_0
+ #undef RS6000_BUILTIN_1
+ #undef RS6000_BUILTIN_2
+ #undef RS6000_BUILTIN_3
+Index: gcc/config/rs6000/altivec.md
+===================================================================
+--- a/src/gcc/config/rs6000/altivec.md (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/altivec.md (.../branches/gcc-6-branch)
+@@ -58,6 +58,7 @@
+ UNSPEC_VSUM2SWS
+ UNSPEC_VSUMSWS
+ UNSPEC_VPERM
++ UNSPEC_VPERMR
+ UNSPEC_VPERM_UNS
+ UNSPEC_VRFIN
+ UNSPEC_VCFUX
+@@ -73,6 +74,9 @@
+ UNSPEC_VUNPACK_LO_SIGN_DIRECT
+ UNSPEC_VUPKHPX
+ UNSPEC_VUPKLPX
++ UNSPEC_DARN
++ UNSPEC_DARN_32
++ UNSPEC_DARN_RAW
+ UNSPEC_DST
+ UNSPEC_DSTT
+ UNSPEC_DSTST
+@@ -189,6 +193,13 @@
+ (KF "FLOAT128_VECTOR_P (KFmode)")
+ (TF "FLOAT128_VECTOR_P (TFmode)")])
+
++;; Specific iterator for parity which does not have a byte/half-word form, but
++;; does have a quad word form
++(define_mode_iterator VParity [V4SI
++ V2DI
++ V1TI
++ (TI "TARGET_VSX_TIMODE")])
++
+ (define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
+ (define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
+ (define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
+@@ -203,6 +214,9 @@
+ (define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
+ (define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
+
++;; Vector negate
++(define_mode_iterator VNEG [V4SI V2DI])
++
+ ;; Vector move instructions.
+ (define_insn "*altivec_mov<mode>"
+ [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,*Y,*r,*r,v,v,*r")
+@@ -1949,32 +1963,30 @@
+
+ ;; Slightly prefer vperm, since the target does not overlap the source
+ (define_insn "*altivec_vperm_<mode>_internal"
+- [(set (match_operand:VM 0 "register_operand" "=v,?wo,?&wo")
+- (unspec:VM [(match_operand:VM 1 "register_operand" "v,0,wo")
+- (match_operand:VM 2 "register_operand" "v,wo,wo")
+- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
++ [(set (match_operand:VM 0 "register_operand" "=v,?wo")
++ (unspec:VM [(match_operand:VM 1 "register_operand" "v,0")
++ (match_operand:VM 2 "register_operand" "v,wo")
++ (match_operand:V16QI 3 "register_operand" "v,wo")]
+ UNSPEC_VPERM))]
+ "TARGET_ALTIVEC"
+ "@
+ vperm %0,%1,%2,%3
+- xxperm %x0,%x2,%x3
+- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
++ xxperm %x0,%x2,%x3"
+ [(set_attr "type" "vecperm")
+- (set_attr "length" "4,4,8")])
++ (set_attr "length" "4")])
+
+ (define_insn "altivec_vperm_v8hiv16qi"
+- [(set (match_operand:V16QI 0 "register_operand" "=v,?wo,?&wo")
+- (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,0,wo")
+- (match_operand:V8HI 2 "register_operand" "v,wo,wo")
+- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
++ [(set (match_operand:V16QI 0 "register_operand" "=v,?wo")
++ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,0")
++ (match_operand:V8HI 2 "register_operand" "v,wo")
++ (match_operand:V16QI 3 "register_operand" "v,wo")]
+ UNSPEC_VPERM))]
+ "TARGET_ALTIVEC"
+ "@
+ vperm %0,%1,%2,%3
+- xxperm %x0,%x2,%x3
+- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
++ xxperm %x0,%x2,%x3"
+ [(set_attr "type" "vecperm")
+- (set_attr "length" "4,4,8")])
++ (set_attr "length" "4")])
+
+ (define_expand "altivec_vperm_<mode>_uns"
+ [(set (match_operand:VM 0 "register_operand" "")
+@@ -1992,18 +2004,17 @@
+ })
+
+ (define_insn "*altivec_vperm_<mode>_uns_internal"
+- [(set (match_operand:VM 0 "register_operand" "=v,?wo,?&wo")
+- (unspec:VM [(match_operand:VM 1 "register_operand" "v,0,wo")
+- (match_operand:VM 2 "register_operand" "v,wo,wo")
+- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
++ [(set (match_operand:VM 0 "register_operand" "=v,?wo")
++ (unspec:VM [(match_operand:VM 1 "register_operand" "v,0")
++ (match_operand:VM 2 "register_operand" "v,wo")
++ (match_operand:V16QI 3 "register_operand" "v,wo")]
+ UNSPEC_VPERM_UNS))]
+ "TARGET_ALTIVEC"
+ "@
+ vperm %0,%1,%2,%3
+- xxperm %x0,%x2,%x3
+- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
++ xxperm %x0,%x2,%x3"
+ [(set_attr "type" "vecperm")
+- (set_attr "length" "4,4,8")])
++ (set_attr "length" "4")])
+
+ (define_expand "vec_permv16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+@@ -2032,6 +2043,19 @@
+ FAIL;
+ })
+
++(define_insn "*altivec_vpermr_<mode>_internal"
++ [(set (match_operand:VM 0 "register_operand" "=v,?wo")
++ (unspec:VM [(match_operand:VM 1 "register_operand" "v,0")
++ (match_operand:VM 2 "register_operand" "v,wo")
++ (match_operand:V16QI 3 "register_operand" "v,wo")]
++ UNSPEC_VPERMR))]
++ "TARGET_P9_VECTOR"
++ "@
++ vpermr %0,%1,%2,%3
++ xxpermr %x0,%x2,%x3"
++ [(set_attr "type" "vecperm")
++ (set_attr "length" "4")])
++
+ (define_insn "altivec_vrfip" ; ceil
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+@@ -2690,20 +2714,28 @@
+ DONE;
+ })
+
++(define_insn "*p9_neg<mode>2"
++ [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
++ (neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
++ "TARGET_P9_VECTOR"
++ "vneg<VI_char> %0,%1"
++ [(set_attr "type" "vecsimple")])
++
+ (define_expand "neg<mode>2"
+- [(use (match_operand:VI 0 "register_operand" ""))
+- (use (match_operand:VI 1 "register_operand" ""))]
+- "TARGET_ALTIVEC"
+- "
++ [(set (match_operand:VI2 0 "register_operand" "")
++ (neg:VI2 (match_operand:VI2 1 "register_operand" "")))]
++ "<VI_unit>"
+ {
+- rtx vzero;
++ if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
++ {
++ rtx vzero;
+
+- vzero = gen_reg_rtx (GET_MODE (operands[0]));
+- emit_insn (gen_altivec_vspltis<VI_char> (vzero, const0_rtx));
+- emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
+-
+- DONE;
+-}")
++ vzero = gen_reg_rtx (GET_MODE (operands[0]));
++ emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
++ emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
++ DONE;
++ }
++})
+
+ (define_expand "udot_prod<mode>"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+@@ -2791,32 +2823,30 @@
+ "")
+
+ (define_insn "vperm_v8hiv4si"
+- [(set (match_operand:V4SI 0 "register_operand" "=v,?wo,?&wo")
+- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,0,wo")
+- (match_operand:V4SI 2 "register_operand" "v,wo,wo")
+- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
++ [(set (match_operand:V4SI 0 "register_operand" "=v,?wo")
++ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,0")
++ (match_operand:V4SI 2 "register_operand" "v,wo")
++ (match_operand:V16QI 3 "register_operand" "v,wo")]
+ UNSPEC_VPERMSI))]
+ "TARGET_ALTIVEC"
+ "@
+ vperm %0,%1,%2,%3
+- xxperm %x0,%x2,%x3
+- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
++ xxperm %x0,%x2,%x3"
+ [(set_attr "type" "vecperm")
+- (set_attr "length" "4,4,8")])
++ (set_attr "length" "4")])
+
+ (define_insn "vperm_v16qiv8hi"
+- [(set (match_operand:V8HI 0 "register_operand" "=v,?wo,?&wo")
+- (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,0,wo")
+- (match_operand:V8HI 2 "register_operand" "v,wo,wo")
+- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
++ [(set (match_operand:V8HI 0 "register_operand" "=v,?wo")
++ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,0")
++ (match_operand:V8HI 2 "register_operand" "v,wo")
++ (match_operand:V16QI 3 "register_operand" "v,wo")]
+ UNSPEC_VPERMHI))]
+ "TARGET_ALTIVEC"
+ "@
+ vperm %0,%1,%2,%3
+- xxperm %x0,%x2,%x3
+- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
++ xxperm %x0,%x2,%x3"
+ [(set_attr "type" "vecperm")
+- (set_attr "length" "4,4,8")])
++ (set_attr "length" "4")])
+
+
+ (define_expand "vec_unpacku_hi_v16qi"
+@@ -3353,7 +3383,7 @@
+ }")
+
+
+-;; Power8 vector instructions encoded as Altivec instructions
++;; Power8/power9 vector instructions encoded as Altivec instructions
+
+ ;; Vector count leading zeros
+ (define_insn "*p8v_clz<mode>2"
+@@ -3364,6 +3394,15 @@
+ [(set_attr "length" "4")
+ (set_attr "type" "vecsimple")])
+
++;; Vector count trailing zeros
++(define_insn "*p9v_ctz<mode>2"
++ [(set (match_operand:VI2 0 "register_operand" "=v")
++ (ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
++ "TARGET_P9_VECTOR"
++ "vctz<wd> %0,%1"
++ [(set_attr "length" "4")
++ (set_attr "type" "vecsimple")])
++
+ ;; Vector population count
+ (define_insn "*p8v_popcount<mode>2"
+ [(set (match_operand:VI2 0 "register_operand" "=v")
+@@ -3373,6 +3412,15 @@
+ [(set_attr "length" "4")
+ (set_attr "type" "vecsimple")])
+
++;; Vector parity
++(define_insn "*p9v_parity<mode>2"
++ [(set (match_operand:VParity 0 "register_operand" "=v")
++ (parity:VParity (match_operand:VParity 1 "register_operand" "v")))]
++ "TARGET_P9_VECTOR"
++ "vprtyb<wd> %0,%1"
++ [(set_attr "length" "4")
++ (set_attr "type" "vecsimple")])
++
+ ;; Vector Gather Bits by Bytes by Doubleword
+ (define_insn "p8v_vgbbd"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+@@ -3540,6 +3588,27 @@
+ [(set_attr "length" "4")
+ (set_attr "type" "vecsimple")])
+
++(define_insn "darn_32"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(const_int 0)] UNSPEC_DARN_32))]
++ "TARGET_MODULO"
++ "darn %0,0"
++ [(set_attr "type" "integer")])
++
++(define_insn "darn_raw"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unspec:DI [(const_int 0)] UNSPEC_DARN_RAW))]
++ "TARGET_MODULO && TARGET_64BIT"
++ "darn %0,2"
++ [(set_attr "type" "integer")])
++
++(define_insn "darn"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unspec:DI [(const_int 0)] UNSPEC_DARN))]
++ "TARGET_MODULO && TARGET_64BIT"
++ "darn %0,1"
++ [(set_attr "type" "integer")])
++
+ (define_expand "bcd<bcd_add_sub>_<code>"
+ [(parallel [(set (reg:CCFP 74)
+ (compare:CCFP
Index: gcc/config/rs6000/rs6000.md
===================================================================
--- a/src/gcc/config/rs6000/rs6000.md (.../tags/gcc_6_1_0_release)
+++ b/src/gcc/config/rs6000/rs6000.md (.../branches/gcc-6-branch)
-@@ -12563,8 +12563,10 @@
+@@ -577,7 +577,9 @@
+ (V16QI "b")
+ (V8HI "h")
+ (V4SI "w")
+- (V2DI "d")])
++ (V2DI "d")
++ (V1TI "q")
++ (TI "q")])
+
+ ;; How many bits in this mode?
+ (define_mode_attr bits [(QI "8") (HI "16") (SI "32") (DI "64")])
+@@ -12563,8 +12565,10 @@
(set_attr "indexed" "no")])
;; A return instruction which the middle-end doesn't see.
@@ -793771,6 +800636,45 @@ Index: gcc/config/rs6000/rs6000.md
""
"blr"
[(set_attr "type" "jmpreg")])
+Index: gcc/config/rs6000/altivec.h
+===================================================================
+--- a/src/gcc/config/rs6000/altivec.h (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/rs6000/altivec.h (.../branches/gcc-6-branch)
+@@ -327,8 +327,8 @@
+ #define vec_sqrt __builtin_vec_sqrt
+ #define vec_vsx_ld __builtin_vec_vsx_ld
+ #define vec_vsx_st __builtin_vec_vsx_st
+-#define vec_xl __builtin_vec_vsx_ld
+-#define vec_xst __builtin_vec_vsx_st
++#define vec_xl __builtin_vec_xl
++#define vec_xst __builtin_vec_xst
+
+ /* Note, xxsldi and xxpermdi were added as __builtin_vsx_<xxx> functions
+ instead of __builtin_vec_<xxx> */
+@@ -384,6 +384,23 @@
+ #define vec_vupklsw __builtin_vec_vupklsw
+ #endif
+
++#ifdef _ARCH_PWR9
++/* Vector additions added in ISA 3.0. */
++#define vec_vctz __builtin_vec_vctz
++#define vec_cntlz __builtin_vec_vctz
++#define vec_vctzb __builtin_vec_vctzb
++#define vec_vctzd __builtin_vec_vctzd
++#define vec_vctzh __builtin_vec_vctzh
++#define vec_vctzw __builtin_vec_vctzw
++#define vec_vprtyb __builtin_vec_vprtyb
++#define vec_vprtybd __builtin_vec_vprtybd
++#define vec_vprtybw __builtin_vec_vprtybw
++
++#ifdef _ARCH_PPC64
++#define vec_vprtybq __builtin_vec_vprtybq
++#endif
++#endif
++
+ /* Predicates.
+ For C++, we use templates in order to allow non-parenthesized arguments.
+ For C, instead, we use macros since non-parenthesized arguments were
Index: gcc/config/rs6000/sysv4.h
===================================================================
--- a/src/gcc/config/rs6000/sysv4.h (.../tags/gcc_6_1_0_release)
@@ -793811,6 +800715,30 @@ Index: gcc/config/rs6000/sysv4.h
#define LINK_START_LINUX_SPEC ""
+Index: gcc/config/arm/arm-builtins.c
+===================================================================
+--- a/src/gcc/config/arm/arm-builtins.c (.../tags/gcc_6_1_0_release)
++++ b/src/gcc/config/arm/arm-builtins.c (.../branches/gcc-6-branch)
+@@ -2861,6 +2861,10 @@
+ int in_n, out_n;
+ bool out_unsigned_p = TYPE_UNSIGNED (type_out);
+
++ /* Can't provide any vectorized builtins when we can't use NEON. */
++ if (!TARGET_NEON)
++ return NULL_TREE;
++
+ if (TREE_CODE (type_out) != VECTOR_TYPE
+ || TREE_CODE (type_in) != VECTOR_TYPE)
+ return NULL_TREE;
+@@ -2875,7 +2879,7 @@
+ NULL_TREE is returned if no such builtin is available. */
+ #undef ARM_CHECK_BUILTIN_MODE
+ #define ARM_CHECK_BUILTIN_MODE(C) \
+- (TARGET_NEON && TARGET_FPU_ARMV8 \
++ (TARGET_FPU_ARMV8 \
+ && flag_unsafe_math_optimizations \
+ && ARM_CHECK_BUILTIN_MODE_1 (C))
+
Index: gcc/config/arm/arm.c
===================================================================
--- a/src/gcc/config/arm/arm.c (.../tags/gcc_6_1_0_release)
@@ -794117,6 +801045,7 @@ Index: gcc/config/visium/visium.c
+ rtx op6 = gen_highpart (SImode, op0);
+ rtx op7 = (op1 == const0_rtx ? op1 : gen_highpart (SImode, op1));
+ rtx op8;
++ rtx x, pat, flags;
+
+ /* If operand #2 is a small constant, then its high part is null. */
+ if (CONST_INT_P (op2))
@@ -794139,14 +801068,13 @@ Index: gcc/config/visium/visium.c
+ }
+
+ /* This is the {add,sub,neg}si3_insn_set_flags pattern. */
-+ rtx x;
+ if (op4 == const0_rtx)
+ x = gen_rtx_NEG (SImode, op5);
+ else
+ x = gen_rtx_fmt_ee (code, SImode, op4, op5);
-+ rtx pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
++ pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
+ XVECEXP (pat, 0, 0) = gen_rtx_SET (op3, x);
-+ rtx flags = gen_rtx_REG (CC_NOOVmode, FLAGS_REGNUM);
++ flags = gen_rtx_REG (CC_NOOVmode, FLAGS_REGNUM);
+ x = gen_rtx_COMPARE (CC_NOOVmode, shallow_copy_rtx (x), const0_rtx);
+ XVECEXP (pat, 0, 1) = gen_rtx_SET (flags, x);
+ emit_insn (pat);
@@ -794200,7 +801128,7 @@ Index: gcc/config/visium/visium.c
- REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op0), l);
- op0 = force_reg (SImode, GEN_INT (trunc_int_for_mode (l, SImode)));
+ REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op1), l);
-+ op1 = force_reg (SImode, GEN_INT (trunc_int_for_mode (l, SImode)));
++ op1 = force_reg (SImode, gen_int_mode (l, SImode));
}
}
else
@@ -794234,6 +801162,27 @@ Index: gcc/config/visium/visium.c
}
/* Expand a cstore of OPERANDS in MODE for EQ/NE/LTU/GTU/GEU/LEU. We generate
+@@ -3537,18 +3597,15 @@
+ int
+ visium_initial_elimination_offset (int from, int to ATTRIBUTE_UNUSED)
+ {
+- const int frame_size = visium_compute_frame_size (get_frame_size ());
+ const int save_fp = current_frame_info.save_fp;
+ const int save_lr = current_frame_info.save_lr;
+ const int lr_slot = current_frame_info.lr_slot;
+- const int local_frame_offset
+- = (save_fp + save_lr + lr_slot) * UNITS_PER_WORD;
+ int offset;
+
+ if (from == FRAME_POINTER_REGNUM)
+- offset = local_frame_offset;
++ offset = (save_fp + save_lr + lr_slot) * UNITS_PER_WORD;
+ else if (from == ARG_POINTER_REGNUM)
+- offset = frame_size;
++ offset = visium_compute_frame_size (get_frame_size ());
+ else
+ gcc_unreachable ();
+
Index: gcc/ipa-icf.c
===================================================================
--- a/src/gcc/ipa-icf.c (.../tags/gcc_6_1_0_release)
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/reproducible/gcc-6.git
More information about the Reproducible-commits
mailing list