[Pkg-golang-commits] [golang] 01/04: Imported Upstream version 1.8.1

Michael Hudson-Doyle mwhudson-guest at moszumanska.debian.org
Mon Apr 10 01:47:28 UTC 2017


This is an automated email from the git hooks/post-receive script.

mwhudson-guest pushed a commit to branch golang-1.8
in repository golang.

commit fbef50273b324fe1adb5e8d00bc5a4fcd08bdf5a
Author: Michael Hudson-Doyle <michael.hudson at canonical.com>
Date:   Mon Apr 10 12:31:24 2017 +1200

    Imported Upstream version 1.8.1
---
 VERSION                                         |   2 +-
 doc/contrib.html                                |   1 +
 doc/contribute.html                             | 527 +++++++++++++++---------
 doc/devel/release.html                          |  12 +
 doc/go1.8.html                                  |   2 +-
 doc/install-source.html                         |   4 +-
 misc/cgo/testcshared/test.bash                  |   7 +
 src/cmd/compile/internal/gc/esc.go              |   9 +-
 src/cmd/compile/internal/gc/ssa.go              |   9 +-
 src/cmd/compile/internal/gc/testdata/arith.go   |  11 +
 src/cmd/compile/internal/gc/typecheck.go        |  11 +-
 src/cmd/compile/internal/gc/walk.go             |  41 +-
 src/cmd/compile/internal/ssa/deadcode.go        |   2 +-
 src/cmd/compile/internal/ssa/export_test.go     |   5 +
 src/cmd/compile/internal/ssa/gen/AMD64Ops.go    |  16 +-
 src/cmd/compile/internal/ssa/gen/ARM.rules      |   4 +-
 src/cmd/compile/internal/ssa/gen/ARM64.rules    | 120 +++---
 src/cmd/compile/internal/ssa/gen/ARM64Ops.go    |  20 +-
 src/cmd/compile/internal/ssa/gen/ARMOps.go      |   4 +-
 src/cmd/compile/internal/ssa/gen/MIPS.rules     |   3 +-
 src/cmd/compile/internal/ssa/gen/MIPSOps.go     |  16 +-
 src/cmd/compile/internal/ssa/gen/S390X.rules    |  12 +-
 src/cmd/compile/internal/ssa/gen/S390XOps.go    |  16 +-
 src/cmd/compile/internal/ssa/gen/genericOps.go  |  28 +-
 src/cmd/compile/internal/ssa/gen/main.go        |   4 +
 src/cmd/compile/internal/ssa/likelyadjust.go    |  23 +-
 src/cmd/compile/internal/ssa/loop_test.go       |  87 ++++
 src/cmd/compile/internal/ssa/op.go              |   1 +
 src/cmd/compile/internal/ssa/opGen.go           | 119 ++++--
 src/cmd/compile/internal/ssa/rewriteARM.go      |  12 +-
 src/cmd/compile/internal/ssa/rewriteARM64.go    | 120 +++---
 src/cmd/compile/internal/ssa/rewriteMIPS.go     |   7 +-
 src/cmd/compile/internal/ssa/rewriteS390X.go    |  65 ++-
 src/cmd/compile/internal/ssa/schedule.go        |  24 +-
 src/cmd/go/go_test.go                           |  18 +
 src/cmd/go/pkg.go                               |   4 -
 src/cmd/go/test.go                              |  21 +
 src/cmd/link/dwarf_test.go                      | 125 ++++++
 src/cmd/link/internal/ld/lib.go                 |  10 +-
 src/cmd/link/internal/ld/macho.go               |   2 +-
 src/cmd/link/internal/ld/macho_combine_dwarf.go |  52 ++-
 src/cmd/link/internal/ppc64/asm.go              |   8 +-
 src/cmd/link/linkbig_test.go                    |   2 +-
 src/crypto/tls/common.go                        |   1 +
 src/crypto/tls/tls_test.go                      |  90 +++-
 src/encoding/xml/marshal_test.go                |   7 +-
 src/encoding/xml/read.go                        |   3 +-
 src/encoding/xml/xml_test.go                    |  34 ++
 src/image/png/reader.go                         |   5 +
 src/image/png/reader_test.go                    |  10 +
 src/internal/testenv/testenv.go                 |   9 +
 src/internal/testenv/testenv_cgo.go             |  11 +
 src/net/http/http.go                            |   2 +-
 src/net/net.go                                  |   2 +-
 src/os/exec/exec_test.go                        |  10 +-
 src/reflect/all_test.go                         |  35 +-
 src/reflect/value.go                            |  26 +-
 src/runtime/crash_unix_test.go                  |  73 ++++
 src/runtime/export_test.go                      |  13 +
 src/runtime/runtime1.go                         |   6 +
 src/runtime/sema.go                             |   1 +
 src/runtime/signal_sighandler.go                |   2 +-
 src/text/template/multi_test.go                 |  37 +-
 src/text/template/template.go                   |   4 +-
 src/time/format_test.go                         |  41 +-
 test/fixedbugs/bug19403.go                      | 134 ++++++
 test/fixedbugs/issue19137.go                    |  35 ++
 test/fixedbugs/issue19168.go                    |  58 +++
 test/fixedbugs/issue19182.go                    |  36 ++
 test/fixedbugs/issue19201.go                    |  52 +++
 test/fixedbugs/issue19217.go                    |  39 ++
 test/fixedbugs/issue19323.go                    |  19 +
 test/fixedbugs/issue19743.go                    |  31 ++
 73 files changed, 1896 insertions(+), 516 deletions(-)

diff --git a/VERSION b/VERSION
index 854106e..41d06ba 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-go1.8
\ No newline at end of file
+go1.8.1
\ No newline at end of file
diff --git a/doc/contrib.html b/doc/contrib.html
index 7bf3d89..fd548d7 100644
--- a/doc/contrib.html
+++ b/doc/contrib.html
@@ -34,6 +34,7 @@ We encourage all Go users to subscribe to
 <p>A <a href="/doc/devel/release.html">summary</a> of the changes between Go releases. Notes for the major releases:</p>
 
 <ul>
+	<li><a href="/doc/go1.8">Go 1.8</a> <small>(February 2017)</small></li>
 	<li><a href="/doc/go1.7">Go 1.7</a> <small>(August 2016)</small></li>
 	<li><a href="/doc/go1.6">Go 1.6</a> <small>(February 2016)</small></li>
 	<li><a href="/doc/go1.5">Go 1.5</a> <small>(August 2015)</small></li>
diff --git a/doc/contribute.html b/doc/contribute.html
index f1a5b27..6977579 100644
--- a/doc/contribute.html
+++ b/doc/contribute.html
@@ -1,14 +1,12 @@
 <!--{
-	"Title": "Contribution Guidelines"
+	"Title": "Contribution Guide"
 }-->
 
-<h2 id="Introduction">Introduction</h2>
-
 <p>
-This document explains how to contribute changes to the Go project.
-It assumes you have followed the
-<a href="/doc/install/source">installation instructions</a> and
-have <a href="code.html">written and tested your code</a>.
+The Go project welcomes all contributors. The process of contributing
+to the Go project may be different than many projects you are used to.
+This document is intended as a guide to help you through the contribution
+process. This guide assumes you have a basic understanding of Git and Go.
 </p>
 
 <p>
@@ -20,103 +18,54 @@ see <a href="gccgo_contribute.html">Contributing to gccgo</a>.)
 Sensitive security-related issues should be reported to <a href="mailto:security at golang.org">security at golang.org</a>.
 </p>
 
-<h2 id="Design">Discuss your design</h2>
-
-<p>
-The project welcomes submissions but please let everyone know what
-you're working on if you want to change or add to the Go repositories.
-</p>
-
-<p>
-Before undertaking to write something new for the Go project,
-please <a href="https://golang.org/issue/new">file an issue</a>
-(or claim an <a href="https://golang.org/issues">existing issue</a>).
-Significant changes must go through the
-<a href="https://golang.org/s/proposal-process">change proposal process</a>
-before they can be accepted.
-</p>
-
-<p>
-This process gives everyone a chance to validate the design,
-helps prevent duplication of effort,
-and ensures that the idea fits inside the goals for the language and tools.
-It also checks that the design is sound before code is written;
-the code review tool is not the place for high-level discussions.
-</p>
-
-<p>
-When planning work, please note that the Go project follows a
-<a href="https://golang.org/wiki/Go-Release-Cycle">six-month
-development cycle</a>. The latter half of each cycle is a three-month
-feature freeze during which only bug fixes and doc updates are accepted.
-New work cannot be submitted during a feature freeze.
-</p>
-
-<h2 id="Testing">Testing redux</h2>
-
-<p>
-You've <a href="code.html">written and tested your code</a>, but
-before sending code out for review, run all the tests for the whole
-tree to make sure the changes don't break other packages or programs:
-</p>
-
-<pre>
-$ cd go/src
-$ ./all.bash
-</pre>
+<h1 id="contributor">Becoming a contributor</h1>
 
 <p>
-(To build under Windows use <code>all.bat</code>.)
+Before you can contribute to the Go project you need to setup a few prerequisites.
+The Go project uses <a href="https://www.gerritcodereview.com/">Gerrit</a>, an open
+source online tool, to perform all code reviews.
+Gerrit uses your email address as a unique identifier.
+The Go project contributing flow is currently configured to work only with Google Accounts.
+You must go through the following process <em>prior to contributing</em>.
+You only need to do this once per Google Account.
 </p>
 
+<h2 id="auth">Configure Git to use Gerrit</h2>
 <p>
-After running for a while, the command should print
-"<code>ALL</code> <code>TESTS</code> <code>PASSED</code>".
+You'll need a web browser and a command line terminal.
+You should already have Git installed.
 </p>
 
-<h2 id="Code_review">Code review</h2>
-
 <p>
-Changes to Go must be reviewed before they are accepted,
-no matter who makes the change.
-A custom git command called <code>git-codereview</code>,
-discussed below, helps manage the code review process through a Google-hosted
-<a href="https://go-review.googlesource.com/">instance</a> of the code review
-system called <a href="https://www.gerritcodereview.com/">Gerrit</a>.
-</p>
-
-<h3 id="auth">Set up authentication for code review</h3>
-
-<p>
-Gerrit uses Google Accounts for authentication. If you don't have
-a Google Account, you can create an account which
+Gerrit uses Google Accounts for authentication.
+If you don't have a Google Account, you can create an account which
 <a href="https://www.google.com/accounts/NewAccount">includes
 a new Gmail email account</a> or create an account associated
 <a href="https://accounts.google.com/SignUpWithoutGmail">with your existing
 email address</a>.
 </p>
 
-<p>
-The email address associated with the Google Account you use will be recorded in
-the <a href="https://go.googlesource.com/go/+log/">change log</a>
-and in the <a href="/CONTRIBUTORS">contributors file</a>.
-</p>
+<h3>Step 1: Sign in to googlesource and generate a password</h3>
 
 <p>
-To set up your account in Gerrit, visit
-<a href="https://go.googlesource.com">go.googlesource.com</a>
+Visit <a href="https://go.googlesource.com">go.googlesource.com</a>
 and click on "Generate Password" in the page's top right menu bar.
+You will be redirected to accounts.google.com to sign in.
 </p>
 
+<h3>Step 2: Run the provided script</h3>
 <p>
-You will be redirected to accounts.google.com to sign in.
+After signing in, you are taken to a page on go.googlesource.com with the title "Configure Git".
+This page contains a personalized script which when run locally will configure git
+to have your unique authentication key.
+This key is paired with one generated server side similar to how ssh keys work.
 </p>
 
 <p>
-Once signed in, you are returned back to go.googlesource.com to "Configure Git".
-Follow the instructions on the page.
-(If you are on a Windows computer, you should instead follow the instructions
-in the yellow box to run the command.)
+Copy and run this script locally in your command line terminal.
+(On a Windows computer using cmd you should instead follow the instructions
+in the yellow box to run the command. If you are using git-bash use the same
+script as *nix.)
 </p>
 
 <p>
@@ -124,23 +73,25 @@ Your secret authentication token is now in a <code>.gitcookie</code> file
 and Git is configured to use this file.
 </p>
 
-<h3 id="gerrit">Register with Gerrit</h3>
+<h3 id="gerrit">Step 3: Register with Gerrit</h3>
 
 <p>
-Now that you have your authentication token,
-you need to register your account with Gerrit.
-To do this, visit
-<a href="https://go-review.googlesource.com/login/">
-go-review.googlesource.com/login/</a>. You will immediately be redirected
-to Google Accounts. Sign in using the same Google Account you used above.
-That is all that is required.
+Now that you have your authentication token, you need to register your
+account with Gerrit.
+To do this, visit <a href="https://go-review.googlesource.com/login/">
+go-review.googlesource.com/login/</a>.
+Sign in using the same Google Account you used above.
 </p>
 
-<h3 id="cla">Contributor License Agreement</h3>
+<h2 id="cla">Contributor License Agreement</h2>
+
+<h3 id="which_cla">Which CLA</h3>
+<p>
+Before sending your first change to the Go project
+you must have completed one of the following two CLAs.
+Which CLA you should sign depends on who owns the copyright to your work.
+</p>
 
-<p>Gerrit serves as the gatekeeper and uses your e-mail address as the key.
-To send your first change to the Go project from a given address,
-you must have completed one of the contributor license agreements:
 <ul>
 <li>
 If you are the copyright holder, you will need to agree to the
@@ -151,37 +102,49 @@ contributor license agreement</a>, which can be completed online.
 If your organization is the copyright holder, the organization
 will need to agree to the
 <a href="https://developers.google.com/open-source/cla/corporate">corporate
-contributor license agreement</a>.
-(If the copyright holder for your code has already completed the
-agreement in connection with another Google open source project,
-it does not need to be completed again.)
+contributor license agreement</a>.<br>
 </li>
 </ul>
 
 <p>
-You can use the links above to create and sign the contributor license agreement
-or you can show your current agreements and create new ones through the Gerrit
-interface.  <a href="https://go-review.googlesource.com/login/">Log into Gerrit</a>,
+<i>If the copyright holder for your contribution has already completed the
+agreement in connection with another Google open source project,
+it does not need to be completed again.</i>
+</p>
+
+<h3 id="signing_cla">Completing the CLA</h3>
+
+<p>
+You can see your currently signed agreements and sign new ones through the Gerrit
+interface.
+To do this, <a href="https://go-review.googlesource.com/login/">Log into Gerrit</a>,
 click your name in the upper-right, choose "Settings", then select "Agreements"
-from the topics on the left. If you do not have a signed agreement listed here,
+from the topics on the left.
+If you do not have a signed agreement listed here,
 you can create one by clicking "New Contributor Agreement" and following the steps.
 </p>
 
 <p>
-This rigmarole only needs to be done for your first submission for each email address.
+If the copyright holder for the code you are submitting changes — for example,
+if you start contributing code on behalf of a new company — please send email
+to golang-dev and let us know, so that we can make sure an appropriate agreement is
+completed and update the <code>AUTHORS</code> file.
 </p>
 
+<span id="Code_review"></span>
+<h1 id="prepare_dev_env">Preparing a Development Environment for Contributing</h1>
+
+<h2 id="git-codereview">Setting up Git for submission to Gerrit</h2>
 <p>
-If the copyright holder for the code you are submitting changes—for example,
-if you start contributing code on behalf of a new company—please send email
-to let us know, so that we can make sure an appropriate agreement is completed
-and update the <code>AUTHORS</code> file.
+Changes to Go must be reviewed before they are accepted, no matter who makes the change.
+A custom git command called <code>git-codereview</code>, discussed below,
+helps manage the code review process through a Google-hosted
+<a href="https://go-review.googlesource.com/">instance</a> Gerrit.
 </p>
 
-<h3 id="git-codereview">Install the git-codereview command</h3>
-
+<h3 id="git-codereview_install">Install the git-codereview command</h3>
 <p>
-Now install the <code>git-codereview</code> command by running,
+Install the <code>git-codereview</code> command by running,
 </p>
 
 <pre>
@@ -202,18 +165,28 @@ prints help text, not an error.
 </p>
 
 <p>
+On Windows, when using git-bash you must make sure that
+<code>git-codereview.exe</code> is in your git exec-path.
+Run <code>git --exec-path</code> to discover the right location then create a
+symbolic link or simply copy the executible from $GOPATH/bin to this directory.
+</p>
+
+<p>
 <b>Note to Git aficionados:</b>
 The <code>git-codereview</code> command is not required to
-upload and manage Gerrit code reviews. For those who prefer plain Git, the text
-below gives the Git equivalent of each git-codereview command.
+upload and manage Gerrit code reviews.
+For those who prefer plain Git, the text below gives the Git equivalent of
+each git-codereview command.
 </p>
 
-<p>If you do use plain
-Git, note that you still need the commit hooks that the git-codereview command
-configures; those hooks add a Gerrit <code>Change-Id</code> line to the commit
-message and check that all Go source files have been formatted with gofmt. Even
-if you intend to use plain Git for daily work, install the hooks in a new Git
-checkout by running <code>git-codereview</code> <code>hooks</code>.
+<p>
+If you do use plain Git, note that you still need the commit hooks that the
+git-codereview command configures; those hooks add a Gerrit
+<code>Change-Id</code> line to the commit message and check that all Go source
+files have been formatted with gofmt.
+Even if you intend to use plain Git for
+daily work, install the hooks in a new Git checkout by running
+<code>git-codereview</code> <code>hooks</code>.
 </p>
 
 <p>
@@ -264,7 +237,8 @@ To install them, copy this text into your Git configuration file
 	sync = codereview sync
 </pre>
 
-<h3 id="help">Understanding the git-codereview command</h3>
+<span id="help"></span>
+<h3 id="understanding_git-codereview">Understanding the git-codereview command</h3>
 
 <p>After installing the <code>git-codereview</code> command, you can run</p>
 
@@ -277,11 +251,70 @@ to learn more about its commands.
 You can also read the <a href="https://godoc.org/golang.org/x/review/git-codereview">command documentation</a>.
 </p>
 
-<h3 id="master">Switch to the master branch</h3>
+
+<h1 id="making_a_contribution">Making a Contribution</h1>
+
+<h2 id="Design">Discuss your design</h2>
+
+<p>
+The project welcomes submissions but please let everyone know what
+you're working on if you want to change or add to the Go repositories.
+</p>
+
+<p>
+Before undertaking to write something new for the Go project,
+please <a href="https://golang.org/issue/new">file an issue</a>
+(or claim an <a href="https://golang.org/issues">existing issue</a>).
+Significant changes must go through the
+<a href="https://golang.org/s/proposal-process">change proposal process</a>
+before they can be accepted.
+</p>
+
+<p>
+This process gives everyone a chance to validate the design,
+helps prevent duplication of effort,
+and ensures that the idea fits inside the goals for the language and tools.
+It also checks that the design is sound before code is written;
+the code review tool is not the place for high-level discussions.
+</p>
+
+<p>
+When planning work, please note that the Go project follows a <a
+href="https://golang.org/wiki/Go-Release-Cycle">six-month development cycle</a>.
+The latter half of each cycle is a three-month feature freeze during
+which only bug fixes and doc updates are accepted. New contributions can be
+sent during a feature freeze but will not be accepted until the freeze thaws.
+</p>
+
+<h2 id="making_a_change">Making a change</h2>
+
+<h3 id="checkout_go">Getting Go Source</h3>
+<p>
+First you need to have a local copy of the source checked out from the correct
+repository.
+As Go builds Go you will also likely need to have a working version
+of Go installed (some documentation changes may not need this).
+This should be a recent version of Go and can be obtained via any package or
+binary distribution or you can build it from source.
+</p>
+
+<p>
+You should checkout the Go source repo anywhere you want as long as it's
+outside of your $GOPATH.
+Go to a directory where you want the source to appear and run the following
+command in a terminal.
+</p>
+
+<pre><code>
+$ git clone https://go.googlesource.com/go
+$ cd go
+</code></pre>
+
+<h3 id="master">Contributing to the main Go tree</h3>
 
 <p>
 Most Go installations use a release branch, but new changes should
-only be made based on the master branch.
+only be made based on the master branch. <br>
 (They may be applied later to a release branch as part of the release process,
 but most contributors won't do this themselves.)
 Before making a change, make sure you start on the master branch:
@@ -297,10 +330,61 @@ $ git sync
 <code>git</code> <code>pull</code> <code>-r</code>.)
 </p>
 
-<h3 id="change">Make a change</h3>
+<h3 id="subrepos">Contributing to subrepositories (golang.org/x/...)</h3>
+
+<p>
+If you are contributing a change to a subrepository, obtain the
+Go package using <code>go get</code>. For example, to contribute
+to <code>golang.org/x/oauth2</code>, check out the code by running:
+</p>
+
+<pre>
+$ go get -d golang.org/x/oauth2/...
+</pre>
+
+<p>
+Then, change your directory to the package's source directory
+(<code>$GOPATH/src/golang.org/x/oauth2</code>).
+</p>
+
+<h3 id="change">Make your changes</h3>
+
+<p>
+The entire checked-out tree is editable.
+Make your changes as you see fit ensuring that you create appropriate
+tests along with your changes. Test your changes as you go.
+</p>
+
+<h3 id="copyright">Copyright</h3>
+
+<p>
+Files in the Go repository don't list author names, both to avoid clutter
+and to avoid having to keep the lists up to date.
+Instead, your name will appear in the
+<a href="https://golang.org/change">change log</a> and in the <a
+href="/CONTRIBUTORS"><code>CONTRIBUTORS</code></a> file and perhaps the <a
+href="/AUTHORS"><code>AUTHORS</code></a> file.
+These files are automatically generated from the commit logs perodically.
+The <a href="/AUTHORS"><code>AUTHORS</code></a> file defines who “The Go
+Authors”—the copyright holders—are.
+</p>
+
+<p>New files that you contribute should use the standard copyright header:</p>
+
+<pre>
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+</pre>
+
+<p>
+Files in the repository are copyright the year they are added.
+Do not update the copyright year on files that you change.
+</p>
+
+<h3 id="commit_changes">Commit your changes</h3>
 
 <p>
-The entire checked-out tree is writable.
 Once you have edited files, you must tell Git that they have been modified.
 You must also tell Git about any files that are added, removed, or renamed files.
 These operations are done with the usual Git commands,
@@ -311,16 +395,19 @@ and
 </p>
 
 <p>
-If you wish to checkpoint your work, or are ready to send the code out for review, run</p>
+Once you have the changes queued up, you will want to commit them.
+In the Go contribution workflow this is done with a `git change` command,
+which creates a local branch and commits the changes directly to that local branch.
+</p>
 
 <pre>
 $ git change <i><branch></i>
 </pre>
 
 <p>
-from any directory in your Go repository to commit the changes so far.
 The name <i><branch></i> is an arbitrary one you choose to identify the
-local branch containing your changes.
+local branch containing your changes and will not be used elsewhere.
+This is an offline operation and nothing will be sent to the server yet.
 </p>
 
 <p>
@@ -331,9 +418,11 @@ then <code>git</code> <code>commit</code>.)
 </p>
 
 <p>
-Git will open a change description file in your editor.
+As the `git commit` is the final step, Git will open an editor to ask for a
+commit message.
 (It uses the editor named by the <code>$EDITOR</code> environment variable,
 <code>vi</code> by default.)
+
 The file will look like:
 </p>
 
@@ -352,7 +441,7 @@ At the beginning of this file is a blank line; replace it
 with a thorough description of your change.
 The first line of the change description is conventionally a one-line
 summary of the change, prefixed by the primary affected package,
-and is used as the subject for code review mail.
+and is used as the subject for code review email.
 It should complete the sentence "This change modifies Go to _____."
 The rest of the description elaborates and should provide context for the
 change and explain what it does.
@@ -395,7 +484,7 @@ the command and move that file to a different branch.
 <p>
 The special notation "Fixes #159" associates the change with issue 159 in the
 <a href="https://golang.org/issue/159">Go issue tracker</a>.
-When this change is eventually submitted, the issue
+When this change is eventually applied, the issue
 tracker will automatically mark the issue as fixed.
 (There are several such conventions, described in detail in the
 <a href="https://help.github.com/articles/closing-issues-via-commit-messages/">GitHub Issue Tracker documentation</a>.)
@@ -407,6 +496,13 @@ save the file and exit the editor.
 </p>
 
 <p>
+You must have the $EDITOR environment variable set properly and working properly (exiting cleanly)
+for this operation to succeed.
+If you run into any issues at this step, it's likely your editor isn't exiting cleanly.
+Try setting a different editor in your $EDITOR environment variable.
+</p>
+
+<p>
 If you wish to do more editing, re-stage your changes using
 <code>git</code> <code>add</code>, and then run
 </p>
@@ -416,8 +512,8 @@ $ git change
 </pre>
 
 <p>
-to update the change description and incorporate the staged changes.  The
-change description contains a <code>Change-Id</code> line near the bottom,
+to update the change description and incorporate the staged changes.
+The change description contains a <code>Change-Id</code> line near the bottom,
 added by a Git commit hook during the initial
 <code>git</code> <code>change</code>.
 That line is used by Gerrit to match successive uploads of the same change.
@@ -429,35 +525,44 @@ Do not edit or delete it.
 runs <code>git</code> <code>commit</code> <code>--amend</code>.)
 </p>
 
-<h3 id="mail">Mail the change for review</h3>
+<h3 id="Testing">Testing</h3>
 
 <p>
-Once the change is ready, mail it out for review:
+You've <a href="code.html">written and tested your code</a>, but
+before sending code out for review, run all the tests for the whole
+tree to make sure the changes don't break other packages or programs:
 </p>
 
 <pre>
-$ git mail
+$ cd go/src
+$ ./all.bash
 </pre>
 
 <p>
-You can specify a reviewer or CC interested parties
-using the <code>-r</code> or <code>-cc</code> options.
-Both accept a comma-separated list of email addresses:
+(To build under Windows use <code>all.bat</code>.)
+</p>
+
+<p>
+After running for a while, the command should print
 </p>
 
 <pre>
-$ git mail -r joe at golang.org -cc mabel at example.com,math-nuts at swtch.com
+"ALL TESTS PASSED".
 </pre>
 
+<h3 id="mail">Send the change for review</h3>
+
 <p>
-Unless explicitly told otherwise, such as in the discussion leading
-up to sending in the change list, it's better not to specify a reviewer.
-All changes are automatically CC'ed to the
-<a href="https://groups.google.com/group/golang-codereviews">golang-codereviews at googlegroups.com</a>
-mailing list. If this is your first ever change, there may be a moderation
-delay before it appears on the mailing list, to prevent spam.
+Once the change is ready, send it for review.
+This is similar to a <code>git push</code> in a GitHub style workflow.
+This is done via the mail alias setup earlier which despite its name, doesn't
+directly mail anything, it simply sends the change to Gerrit via git push.
 </p>
 
+<pre>
+$ git mail
+</pre>
+
 <p>
 (In Git terms, <code>git</code> <code>mail</code> pushes the local committed
 changes to Gerrit using <code>git</code> <code>push</code> <code>origin</code>
@@ -479,7 +584,76 @@ remote: New Changes:
 remote:   https://go-review.googlesource.com/99999 math: improved Sin, Cos and Tan precision for very large arguments
 </pre>
 
-<h3 id="review">Reviewing code</h3>
+<h3>Troubleshooting</h3>
+
+<p>
+The most common way that the <code>git mail</code> command fails is because the
+email address used has not gone through the setup above.
+<br>
+If you see something like...
+</p>
+
+<pre>
+remote: Processing changes: refs: 1, done
+remote:
+remote: ERROR:  In commit ab13517fa29487dcf8b0d48916c51639426c5ee9
+remote: ERROR:  author email address XXXXXXXXXXXXXXXXXXX
+remote: ERROR:  does not match your user account.
+</pre>
+
+<p>
+You need to either add the email address listed to the CLA or set this repo to use
+another email address already approved.
+</p>
+
+<p>
+First let's change the email address for this repo so this doesn't happen again.
+You can change your email address for this repo with the following command:
+</p>
+
+<pre>
+$ git config user.email email at address.com
+</pre>
+
+<p>
+Then change the previous commit to use this alternative email address.
+You can do that with:
+</p>
+
+<pre>
+$ git commit --amend --author="Author Name <email at address.com>"
+</pre>
+
+<p>
+Finally try to resend with:
+</p>
+
+<pre>
+$ git mail
+</pre>
+
+<h3 id="cc">Specifying a reviewer / CCing others</h3>
+
+<p>
+Unless explicitly told otherwise, such as in the discussion leading
+up to sending in the change list, it's better not to specify a reviewer.
+All changes are automatically CC'ed to the
+<a href="https://groups.google.com/group/golang-codereviews">golang-codereviews at googlegroups.com</a>
+mailing list. If this is your first ever change, there may be a moderation
+delay before it appears on the mailing list, to prevent spam.
+</p>
+
+<p>
+You can specify a reviewer or CC interested parties
+using the <code>-r</code> or <code>-cc</code> options.
+Both accept a comma-separated list of email addresses:
+</p>
+
+<pre>
+$ git mail -r joe at golang.org -cc mabel at example.com,math-nuts at swtch.com
+</pre>
+
+<h2 id="review">Going through the review process</h2>
 
 <p>
 Running <code>git</code> <code>mail</code> will send an email to you and the
@@ -491,7 +665,15 @@ You must reply through the web interface.
 (Unlike with the old Rietveld review system, replying by mail has no effect.)
 </p>
 
-<h3 id="revise">Revise and upload</h3>
+<h3 id="revise">Revise and resend</h3>
+
+<p>
+The Go contribution workflow is optimized for iterative revisions based on
+feedback.
+It is rare that an initial contribution will be ready to be applied as is.
+As you revise your contribution and resend Gerrit will retain a history of
+all the changes and comments made in the single URL.
+</p>
 
 <p>
 You must respond to review comments through the web interface.
@@ -534,6 +716,8 @@ $ git sync
 <code>git</code> <code>pull</code> <code>-r</code>.)
 </p>
 
+<h3 id="resolving_conflicts">Resolving Conflicts</h3>
+
 <p>
 If files you were editing have changed, Git does its best to merge the
 remote changes into your local changes.
@@ -609,8 +793,8 @@ might turn up:
 <p>
 Git doesn't show it, but suppose the original text that both edits
 started with was 1e8; you changed it to 1e10 and the other change to 1e9,
-so the correct answer might now be 1e10.  First, edit the section
-to remove the markers and leave the correct code:
+so the correct answer might now be 1e10.
+First, edit the section to remove the markers and leave the correct code:
 </p>
 
 <pre>
@@ -639,10 +823,13 @@ restore the change commit.
 <h3 id="download">Reviewing code by others</h3>
 
 <p>
-You can import a change proposed by someone else into your local Git repository.
+As part of the review process reviewers can propose changes directly (in the
+GitHub workflow this would be someone else attaching commits to a pull request).
+
+You can import these changes proposed by someone else into your local Git repository.
 On the Gerrit review page, click the "Download ▼" link in the upper right
-corner, copy the "Checkout" command and run it from your local Git repo.
-It should look something like this:
+corner, copy the "Checkout" command and run it from your local Git repo. It
+should look something like this:
 </p>
 
 <pre>
@@ -653,11 +840,11 @@ $ git fetch https://go.googlesource.com/review refs/changes/21/1221/1 &&
 To revert, change back to the branch you were working in.
 </p>
 
-<h3 id="submit">Submit the change after the review</h3>
+<h2 id="submit">Apply the change to the master branch</h2>
 
 <p>
 After the code has been <code>LGTM</code>'ed, an approver may
-submit it to the master branch using the Gerrit UI.
+apply it to the master branch using the Gerrit UI.
 There is a "Submit" button on the web page for the change
 that appears once the change is approved (marked +2).
 </p>
@@ -669,41 +856,13 @@ and the code review will be updated with a link to the change
 in the repository.
 Since the method used to integrate the changes is "Cherry Pick",
 the commit hashes in the repository will be changed by
-the submit operation.
+the "Submit" operation.
 </p>
 
-<h3 id="more">More information</h3>
+<h2 id="more">More information</h2>
 
 <p>
-In addition to the information here, the Go community maintains a <a href="https://golang.org/wiki/CodeReview">CodeReview</a> wiki page.
+In addition to the information here, the Go community maintains a <a
+href="https://golang.org/wiki/CodeReview">CodeReview</a> wiki page.
 Feel free to contribute to this page as you learn the review process.
 </p>
-
-<h2 id="copyright">Copyright</h2>
-
-<p>Files in the Go repository don't list author names,
-both to avoid clutter and to avoid having to keep the lists up to date.
-Instead, your name will appear in the
-<a href="https://golang.org/change">change log</a>
-and in the <a href="/CONTRIBUTORS"><code>CONTRIBUTORS</code></a> file
-and perhaps the <a href="/AUTHORS"><code>AUTHORS</code></a> file.
-</p>
-
-<p>The <a href="/CONTRIBUTORS"><code>CONTRIBUTORS</code></a> file
-defines who the Go contributors—the people—are;
-the <a href="/AUTHORS"><code>AUTHORS</code></a> file defines
-who “The Go Authors”—the copyright holders—are.
-These files will be periodically updated based on the commit logs.
-
-<p>Code that you contribute should use the standard copyright header:</p>
-
-<pre>
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-</pre>
-
-<p>
-Files in the repository are copyright the year they are added. It is not
-necessary to update the copyright year on files that you change.
-</p>
diff --git a/doc/devel/release.html b/doc/devel/release.html
index d046149..c4ca6a6 100644
--- a/doc/devel/release.html
+++ b/doc/devel/release.html
@@ -37,6 +37,18 @@ Go 1.8 is a major release of Go.
 Read the <a href="/doc/go1.8">Go 1.8 Release Notes</a> for more information.
 </p>
 
+<h3 id="go1.8.minor">Minor revisions</h3>
+
+<p>
+go1.8.1 (released 2017/04/07) includes fixes to the compiler, linker, runtime,
+documentation, <code>go</code> command and the <code>crypto/tls</code>,
+<code>encoding/xml</code>, <code>image/png</code>, <code>net</code>,
+<code>net/http</code>, <code>reflect</code>, <code>text/template</code>,
+and <code>time</code> packages.
+See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.8.1">Go
+1.8.1 milestone</a> on our issue tracker for details.
+</p>
+
 <h2 id="go1.7">go1.7 (released 2016/08/15)</h2>
 
 <p>
diff --git a/doc/go1.8.html b/doc/go1.8.html
index cf4c669..611f5ee 100644
--- a/doc/go1.8.html
+++ b/doc/go1.8.html
@@ -1304,7 +1304,7 @@ crypto/x509: return error for missing SerialNumber (CL 27238)
         request must have the new
         <a href="/pkg/net/http/#Request"><code>Request.GetBody</code></a>
         field defined.
-        <a href="pkg/net/http/#NewRequest"><code>NewRequest</code></a>
+        <a href="/pkg/net/http/#NewRequest"><code>NewRequest</code></a>
         sets <code>Request.GetBody</code> automatically for common
         body types.
       </li>
diff --git a/doc/install-source.html b/doc/install-source.html
index 45c5bbb..8fb26a9 100644
--- a/doc/install-source.html
+++ b/doc/install-source.html
@@ -221,7 +221,7 @@ To build without <code>cgo</code>, set the environment variable
 Change to the directory that will be its parent
 and make sure the <code>go</code> directory does not exist.
 Then clone the repository and check out the latest release tag
-(<code class="versionTag">go1.8</code>, for example):</p>
+(<code class="versionTag">go1.8.1</code>, for example):</p>
 
 <pre>
 $ git clone https://go.googlesource.com/go
@@ -409,7 +409,7 @@ New releases are announced on the
 <a href="//groups.google.com/group/golang-announce">golang-announce</a>
 mailing list.
 Each announcement mentions the latest release tag, for instance,
-<code class="versionTag">go1.8</code>.
+<code class="versionTag">go1.8.1</code>.
 </p>
 
 <p>
diff --git a/misc/cgo/testcshared/test.bash b/misc/cgo/testcshared/test.bash
index 052ee0e..0315fb0 100755
--- a/misc/cgo/testcshared/test.bash
+++ b/misc/cgo/testcshared/test.bash
@@ -179,6 +179,13 @@ if test "$output" != "PASS"; then
     status=1
 fi
 
+if test "$libext" = "dylib"; then
+	# make sure dylibs are well-formed
+	if ! otool -l libgo*.dylib >/dev/null; then
+		status=1
+	fi
+fi
+
 if test $status = 0; then
     echo "ok"
 fi
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index 4f37ff0..f4a70c4 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -477,6 +477,10 @@ func escAnalyze(all []*Node, recursive bool) {
 	for _, n := range all {
 		if n.Op == ODCLFUNC {
 			n.Esc = EscFuncPlanned
+			if Debug['m'] > 3 {
+				Dump("escAnalyze", n)
+			}
+
 		}
 	}
 
@@ -1675,7 +1679,10 @@ func (e *EscState) escflows(dst, src *Node, why *EscStep) {
 	}
 
 	// Don't bother building a graph for scalars.
-	if src.Type != nil && !haspointers(src.Type) {
+	if src.Type != nil && !haspointers(src.Type) && !isReflectHeaderDataField(src) {
+		if Debug['m'] > 3 {
+			fmt.Printf("%v::NOT flows:: %S <- %S\n", linestr(lineno), dst, src)
+		}
 		return
 	}
 
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index bf483f8..a0c1cf1 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -3470,8 +3470,13 @@ func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip
 	if s.WBLineno == 0 {
 		s.WBLineno = left.Line
 	}
-	s.storeTypeScalars(t, left, right, skip)
-	s.storeTypePtrsWB(t, left, right)
+	if t == Types[TUINTPTR] {
+		// Stores to reflect.{Slice,String}Header.Data.
+		s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
+	} else {
+		s.storeTypeScalars(t, left, right, skip)
+		s.storeTypePtrsWB(t, left, right)
+	}
 
 	// WB ops will be expanded to branches at writebarrier phase.
 	// To make it easy, we put WB ops at the end of a block, so
diff --git a/src/cmd/compile/internal/gc/testdata/arith.go b/src/cmd/compile/internal/gc/testdata/arith.go
index d850ce2..f260d45 100644
--- a/src/cmd/compile/internal/gc/testdata/arith.go
+++ b/src/cmd/compile/internal/gc/testdata/arith.go
@@ -488,6 +488,17 @@ func testLrot() {
 			wantA, wantB, wantC, wantD, ", got", a, b, c, d)
 		failed = true
 	}
+	// Also test inputs with the top bit set, and make sure
+	// sub-word right shift has high bits cleared first.
+	// See issue #19270.
+	wantA, wantB, wantC, wantD = uint8(0xdf), uint16(0xdfff),
+		uint32(0xdfffffff), uint64(0xdfffffffffffffff)
+	a, b, c, d = lrot1_ssa(0xfe, 0xfffe, 0xfffffffe, 0xfffffffffffffffe)
+	if a != wantA || b != wantB || c != wantC || d != wantD {
+		println("lrot1_ssa(0xfe, 0xfffe, 0xfffffffe, 0xfffffffffffffffe)=",
+			wantA, wantB, wantC, wantD, ", got", a, b, c, d)
+		failed = true
+	}
 	x := lrot2_ssa(0xb0000001, 32)
 	wantX := uint32(0xb0000001)
 	if x != wantX {
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index 5ec1c9e..7cfd951 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -1107,7 +1107,7 @@ OpSwitch:
 
 	case OSLICE, OSLICE3:
 		ok |= Erv
-		n.Left = typecheck(n.Left, top)
+		n.Left = typecheck(n.Left, Erv)
 		low, high, max := n.SliceBounds()
 		hasmax := n.Op.IsSlice3()
 		low = typecheck(low, Erv)
@@ -1119,6 +1119,10 @@ OpSwitch:
 		max = indexlit(max)
 		n.SetSliceBounds(low, high, max)
 		l := n.Left
+		if l.Type == nil {
+			n.Type = nil
+			return n
+		}
 		if l.Type.IsArray() {
 			if !islvalue(n.Left) {
 				yyerror("invalid operation %v (slice of unaddressable value)", n)
@@ -1131,12 +1135,7 @@ OpSwitch:
 			n.Left = typecheck(n.Left, Erv)
 			l = n.Left
 		}
-
 		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
 		var tp *Type
 		if t.IsString() {
 			if hasmax {
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 7c2e2ab..d080d21 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -2071,6 +2071,29 @@ func isstack(n *Node) bool {
 	return false
 }
 
+// isReflectHeaderDataField reports whether l is an expression p.Data
+// where p has type reflect.SliceHeader or reflect.StringHeader.
+func isReflectHeaderDataField(l *Node) bool {
+	if l.Type != Types[TUINTPTR] {
+		return false
+	}
+
+	var tsym *Sym
+	switch l.Op {
+	case ODOT:
+		tsym = l.Left.Type.Sym
+	case ODOTPTR:
+		tsym = l.Left.Type.Elem().Sym
+	default:
+		return false
+	}
+
+	if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
+		return false
+	}
+	return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
+}
+
 // Do we need a write barrier for the assignment l = r?
 func needwritebarrier(l *Node, r *Node) bool {
 	if !use_writebarrier {
@@ -2081,15 +2104,21 @@ func needwritebarrier(l *Node, r *Node) bool {
 		return false
 	}
 
-	// No write barrier for write of non-pointers.
-	dowidth(l.Type)
-
-	if !haspointers(l.Type) {
+	// No write barrier for write to stack.
+	if isstack(l) {
 		return false
 	}
 
-	// No write barrier for write to stack.
-	if isstack(l) {
+	// Package unsafe's documentation says storing pointers into
+	// reflect.SliceHeader and reflect.StringHeader's Data fields
+	// is valid, even though they have type uintptr (#19168).
+	if isReflectHeaderDataField(l) {
+		return true
+	}
+
+	// No write barrier for write of non-pointers.
+	dowidth(l.Type)
+	if !haspointers(l.Type) {
 		return false
 	}
 
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
index d75d2d5..ce786a9 100644
--- a/src/cmd/compile/internal/ssa/deadcode.go
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -64,7 +64,7 @@ func liveValues(f *Func, reachable []bool) []bool {
 			q = append(q, v)
 		}
 		for _, v := range b.Values {
-			if opcodeTable[v.Op].call && !live[v.ID] {
+			if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] {
 				live[v.ID] = true
 				q = append(q, v)
 			}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 3a9357d..ee6ed51 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -6,6 +6,7 @@ package ssa
 
 import (
 	"cmd/internal/obj"
+	"cmd/internal/obj/s390x"
 	"cmd/internal/obj/x86"
 	"testing"
 )
@@ -21,6 +22,10 @@ func testConfig(t testing.TB) *Config {
 	return NewConfig("amd64", DummyFrontend{t}, testCtxt, true)
 }
 
+func testConfigS390X(t testing.TB) *Config {
+	return NewConfig("s390x", DummyFrontend{t}, obj.Linknew(&s390x.Links390x), true)
+}
+
 // DummyFrontend is a test-only frontend.
 // It assumes 64 bit integers and pointers.
 type DummyFrontend struct {
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index 5a293d1..016ed4f 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -522,15 +522,15 @@ func init() {
 		// store arg0 to arg1+auxint+aux, arg2=mem.
 		// These ops return a tuple of <old contents of *(arg1+auxint+aux), memory>.
 		// Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
-		{name: "XCHGL", argLength: 3, reg: gpstorexchg, asm: "XCHGL", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true},
-		{name: "XCHGQ", argLength: 3, reg: gpstorexchg, asm: "XCHGQ", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true},
+		{name: "XCHGL", argLength: 3, reg: gpstorexchg, asm: "XCHGL", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true},
+		{name: "XCHGQ", argLength: 3, reg: gpstorexchg, asm: "XCHGQ", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true},
 
 		// Atomic adds.
 		// *(arg1+auxint+aux) += arg0.  arg2=mem.
 		// Returns a tuple of <old contents of *(arg1+auxint+aux), memory>.
 		// Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
-		{name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true},
-		{name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true},
+		{name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true},
+		{name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true},
 		{name: "AddTupleFirst32", argLength: 2}, // arg0=tuple <x,y>.  Returns <x+arg1,y>.
 		{name: "AddTupleFirst64", argLength: 2}, // arg0=tuple <x,y>.  Returns <x+arg1,y>.
 
@@ -553,12 +553,12 @@ func init() {
 		//    JEQ ...
 		// but we can't do that because memory-using ops can't generate flags yet
 		// (flagalloc wants to move flag-generating instructions around).
-		{name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true},
-		{name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true},
+		{name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// Atomic memory updates.
-		{name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true}, // *(arg0+auxint+aux) &= arg1
-		{name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true},   // *(arg0+auxint+aux) |= arg1
+		{name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, // *(arg0+auxint+aux) &= arg1
+		{name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},   // *(arg0+auxint+aux) |= arg1
 	}
 
 	var AMD64blocks = []blockData{
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index bea9d6c..aad2587 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -178,8 +178,8 @@
 (Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
 
 (Lrot32 x [c]) -> (SRRconst x [32-c&31])
-(Lrot16 <t> x [c]) -> (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
-(Lrot8 <t> x [c]) -> (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+(Lrot16 <t> x [c]) -> (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to32 x) [16-c&15]))
+(Lrot8 <t> x [c]) -> (OR (SLLconst <t> x [c&7]) (SRLconst <t> (ZeroExt8to32 x) [8-c&7]))
 
 // constants
 (Const8 [val]) -> (MOVWconst [val])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 940c060..bc58a1f 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -534,69 +534,69 @@
 (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
 	(MOVBUload [off1+off2] {sym} ptr mem)
 (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVHload [off1+off2] {sym} ptr mem)
 (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVHUload [off1+off2] {sym} ptr mem)
 (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVWload [off1+off2] {sym} ptr mem)
 (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVWUload [off1+off2] {sym} ptr mem)
 (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVDload [off1+off2] {sym} ptr mem)
 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(FMOVSload [off1+off2] {sym} ptr mem)
 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(FMOVDload [off1+off2] {sym} ptr mem)
 
 (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) ->
 	(MOVBstore [off1+off2] {sym} ptr val mem)
 (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVHstore [off1+off2] {sym} ptr val mem)
 (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVWstore [off1+off2] {sym} ptr val mem)
 (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVDstore [off1+off2] {sym} ptr val mem)
 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(FMOVSstore [off1+off2] {sym} ptr val mem)
 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(FMOVDstore [off1+off2] {sym} ptr val mem)
 (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
 	(MOVBstorezero [off1+off2] {sym} ptr mem)
 (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVHstorezero [off1+off2] {sym} ptr mem)
 (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVWstorezero [off1+off2] {sym} ptr mem)
 (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+	&& is32Bit(off1+off2) && !isArg(sym)
+	&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
 	(MOVDstorezero [off1+off2] {sym} ptr mem)
 
 (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
@@ -606,71 +606,71 @@
 	&& is32Bit(off1+off2) ->
 	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 
 (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
 	&& is32Bit(off1+off2) ->
 	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
 	&& is32Bit(off1+off2) ->
 	(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
-	&& is32Bit(off1+off2)
-	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
+	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
 	(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 
 // store zero
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index e8d5be2..0986ac6 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -456,16 +456,16 @@ func init() {
 
 		// atomic stores.
 		// store arg1 to arg0. arg2=mem. returns memory. auxint must be zero.
-		{name: "STLR", argLength: 3, reg: gpstore, asm: "STLR", faultOnNilArg0: true},
-		{name: "STLRW", argLength: 3, reg: gpstore, asm: "STLRW", faultOnNilArg0: true},
+		{name: "STLR", argLength: 3, reg: gpstore, asm: "STLR", faultOnNilArg0: true, hasSideEffects: true},
+		{name: "STLRW", argLength: 3, reg: gpstore, asm: "STLRW", faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic exchange.
 		// store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
 		// LDAXR	(Rarg0), Rout
 		// STLXR	Rarg1, (Rarg0), Rtmp
 		// CBNZ		Rtmp, -2(PC)
-		{name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true},
-		{name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic add.
 		// *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
@@ -473,8 +473,8 @@ func init() {
 		// ADD		Rarg1, Rout
 		// STLXR	Rout, (Rarg0), Rtmp
 		// CBNZ		Rtmp, -3(PC)
-		{name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true},
-		{name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic compare and swap.
 		// arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
@@ -490,8 +490,8 @@ func init() {
 		// STLXR	Rarg2, (Rarg0), Rtmp
 		// CBNZ		Rtmp, -4(PC)
 		// CSET		EQ, Rout
-		{name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true},
-		{name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic and/or.
 		// *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.
@@ -499,8 +499,8 @@ func init() {
 		// AND/OR	Rarg1, Rtmp
 		// STLXRB	Rtmp, (Rarg0), Rtmp
 		// CBNZ		Rtmp, -3(PC)
-		{name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true},
-		{name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "ORR", faultOnNilArg0: true},
+		{name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "ORR", faultOnNilArg0: true, hasSideEffects: true},
 	}
 
 	blocks := []blockData{
diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go
index e296d06..6d9a90a 100644
--- a/src/cmd/compile/internal/ssa/gen/ARMOps.go
+++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go
@@ -119,8 +119,8 @@ func init() {
 		fp01      = regInfo{inputs: nil, outputs: []regMask{fp}}
 		fp11      = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
 		fp1flags  = regInfo{inputs: []regMask{fp}}
-		fpgp      = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
-		gpfp      = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+		fpgp      = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp
+		gpfp      = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")}
 		fp21      = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
 		fp2flags  = regInfo{inputs: []regMask{fp, fp}}
 		fpload    = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index 008f1b1..ad7a954 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -404,8 +404,7 @@
 				(ANDconst  <config.fe.TypeUInt32()> [3] ptr)))
 		(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()>
 			(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3]
-				(ANDconst <config.fe.TypeUInt32()> [3]
-					(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
+				(ANDconst <config.fe.TypeUInt32()> [3] ptr))))) mem)
 
 // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
 (AtomicOr8 ptr val mem) && config.BigEndian ->
diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
index 78b961f..3d88b71 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPSOps.go
+++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
@@ -267,8 +267,8 @@ func init() {
 		// SYNC
 		// MOVW	Rarg1, (Rarg0)
 		// SYNC
-		{name: "LoweredAtomicStore", argLength: 3, reg: gpstore, faultOnNilArg0: true},
-		{name: "LoweredAtomicStorezero", argLength: 2, reg: gpstore0, faultOnNilArg0: true},
+		{name: "LoweredAtomicStore", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicStorezero", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic exchange.
 		// store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
@@ -278,7 +278,7 @@ func init() {
 		// SC	Rtmp, (Rarg0)
 		// BEQ	Rtmp, -3(PC)
 		// SYNC
-		{name: "LoweredAtomicExchange", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicExchange", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic add.
 		// *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
@@ -289,8 +289,8 @@ func init() {
 		// BEQ	Rtmp, -3(PC)
 		// SYNC
 		// ADDU Rarg1, Rout
-		{name: "LoweredAtomicAdd", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true},
-		{name: "LoweredAtomicAddconst", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicAdd", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicAddconst", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic compare and swap.
 		// arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
@@ -308,7 +308,7 @@ func init() {
 		// SC	Rout, (Rarg0)
 		// BEQ	Rout, -4(PC)
 		// SYNC
-		{name: "LoweredAtomicCas", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicCas", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// atomic and/or.
 		// *arg0 &= (|=) arg1. arg2=mem. returns memory.
@@ -318,8 +318,8 @@ func init() {
 		// SC	Rtmp, (Rarg0)
 		// BEQ	Rtmp, -3(PC)
 		// SYNC
-		{name: "LoweredAtomicAnd", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true},
-		{name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true},
+		{name: "LoweredAtomicAnd", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
 
 		// large or unaligned zeroing
 		// arg0 = address of memory to zero (in R1, changed as side effect)
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index c26515c..7ecea02 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -440,7 +440,7 @@
 (If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
 (If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
 
-(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg cond)) yes no)
+(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <config.fe.TypeBool()> cond)) yes no)
 
 // ***************************
 // Above: lowering rules
@@ -448,7 +448,15 @@
 // ***************************
 // TODO: Should the optimizations be a separate pass?
 
-// if a register move has only 1 use, just use the same register without emitting instruction
+// Fold unnecessary type conversions.
+(MOVDreg <t> x) && t.Compare(x.Type) == CMPeq -> x
+(MOVDnop <t> x) && t.Compare(x.Type) == CMPeq -> x
+
+// Propagate constants through type conversions.
+(MOVDreg (MOVDconst [c])) -> (MOVDconst [c])
+(MOVDnop (MOVDconst [c])) -> (MOVDconst [c])
+
+// If a register move has only 1 use, just use the same register without emitting instruction.
 // MOVDnop doesn't emit instruction, only for ensuring the type.
 (MOVDreg x) && x.Uses == 1 -> (MOVDnop x)
 
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
index 29383f6..6fdeb2e 100644
--- a/src/cmd/compile/internal/ssa/gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -429,14 +429,14 @@ func init() {
 
 		// Atomic stores. These are just normal stores.
 		// store arg1 to arg0+auxint+aux. arg2=mem.
-		{name: "MOVWatomicstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true},
-		{name: "MOVDatomicstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true},
+		{name: "MOVWatomicstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "MOVDatomicstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// Atomic adds.
 		// *(arg0+auxint+aux) += arg1.  arg2=mem.
 		// Returns a tuple of <old contents of *(arg0+auxint+aux), memory>.
-		{name: "LAA", argLength: 3, reg: gpstorelaa, asm: "LAA", typ: "(UInt32,Mem)", aux: "SymOff", faultOnNilArg0: true},
-		{name: "LAAG", argLength: 3, reg: gpstorelaa, asm: "LAAG", typ: "(UInt64,Mem)", aux: "SymOff", faultOnNilArg0: true},
+		{name: "LAA", argLength: 3, reg: gpstorelaa, asm: "LAA", typ: "(UInt32,Mem)", aux: "SymOff", faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LAAG", argLength: 3, reg: gpstorelaa, asm: "LAAG", typ: "(UInt64,Mem)", aux: "SymOff", faultOnNilArg0: true, hasSideEffects: true},
 		{name: "AddTupleFirst32", argLength: 2}, // arg0=tuple <x,y>.  Returns <x+arg1,y>.
 		{name: "AddTupleFirst64", argLength: 2}, // arg0=tuple <x,y>.  Returns <x+arg1,y>.
 
@@ -461,13 +461,13 @@ func init() {
 		//    BEQ ...
 		// but we can't do that because memory-using ops can't generate flags yet
 		// (flagalloc wants to move flag-generating instructions around).
-		{name: "LoweredAtomicCas32", argLength: 4, reg: cas, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true},
-		{name: "LoweredAtomicCas64", argLength: 4, reg: cas, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicCas32", argLength: 4, reg: cas, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicCas64", argLength: 4, reg: cas, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// Lowered atomic swaps, emulated using compare-and-swap.
 		// store arg1 to arg0+auxint+aux, arg2=mem.
-		{name: "LoweredAtomicExchange32", argLength: 3, reg: exchange, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true},
-		{name: "LoweredAtomicExchange64", argLength: 3, reg: exchange, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true},
+		{name: "LoweredAtomicExchange32", argLength: 3, reg: exchange, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+		{name: "LoweredAtomicExchange64", argLength: 3, reg: exchange, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
 
 		// find leftmost one
 		{
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index fe93e52..b1689df 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -460,20 +460,20 @@ var genericOps = []opData{
 	// Atomic loads return a new memory so that the loads are properly ordered
 	// with respect to other loads and stores.
 	// TODO: use for sync/atomic at some point.
-	{name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"},         // Load from arg0.  arg1=memory.  Returns loaded value and new memory.
-	{name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"},         // Load from arg0.  arg1=memory.  Returns loaded value and new memory.
-	{name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"},       // Load from arg0.  arg1=memory.  Returns loaded value and new memory.
-	{name: "AtomicStore32", argLength: 3, typ: "Mem"},                 // Store arg1 to *arg0.  arg2=memory.  Returns memory.
-	{name: "AtomicStore64", argLength: 3, typ: "Mem"},                 // Store arg1 to *arg0.  arg2=memory.  Returns memory.
-	{name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem"},            // Store arg1 to *arg0.  arg2=memory.  Returns memory.
-	{name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)"},     // Store arg1 to *arg0.  arg2=memory.  Returns old contents of *arg0 and new memory.
-	{name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)"},     // Store arg1 to *arg0.  arg2=memory.  Returns old contents of *arg0 and new memory.
-	{name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)"},          // Do *arg0 += arg1.  arg2=memory.  Returns sum and new memory.
-	{name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)"},          // Do *arg0 += arg1.  arg2=memory.  Returns sum and new memory.
-	{name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)"}, // if *arg0==arg1, then set *arg0=arg2.  Returns true iff store happens and new memory.
-	{name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)"}, // if *arg0==arg1, then set *arg0=arg2.  Returns true iff store happens and new memory.
-	{name: "AtomicAnd8", argLength: 3, typ: "Mem"},                    // *arg0 &= arg1.  arg2=memory.  Returns memory.
-	{name: "AtomicOr8", argLength: 3, typ: "Mem"},                     // *arg0 |= arg1.  arg2=memory.  Returns memory.
+	{name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"},                               // Load from arg0.  arg1=memory.  Returns loaded value and new memory.
+	{name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"},                               // Load from arg0.  arg1=memory.  Returns loaded value and new memory.
+	{name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"},                             // Load from arg0.  arg1=memory.  Returns loaded value and new memory.
+	{name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true},                 // Store arg1 to *arg0.  arg2=memory.  Returns memory.
+	{name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true},                 // Store arg1 to *arg0.  arg2=memory.  Returns memory.
+	{name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true},            // Store arg1 to *arg0.  arg2=memory.  Returns memory.
+	{name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true},     // Store arg1 to *arg0.  arg2=memory.  Returns old contents of *arg0 and new memory.
+	{name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true},     // Store arg1 to *arg0.  arg2=memory.  Returns old contents of *arg0 and new memory.
+	{name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true},          // Do *arg0 += arg1.  arg2=memory.  Returns sum and new memory.
+	{name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true},          // Do *arg0 += arg1.  arg2=memory.  Returns sum and new memory.
+	{name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2.  Returns true iff store happens and new memory.
+	{name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2.  Returns true iff store happens and new memory.
+	{name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true},                    // *arg0 &= arg1.  arg2=memory.  Returns memory.
+	{name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true},                     // *arg0 |= arg1.  arg2=memory.  Returns memory.
 }
 
 //     kind           control    successors       implicit exit
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
index 41199f7..19b904a 100644
--- a/src/cmd/compile/internal/ssa/gen/main.go
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -52,6 +52,7 @@ type opData struct {
 	faultOnNilArg0    bool  // this op will fault if arg0 is nil (and aux encodes a small offset)
 	faultOnNilArg1    bool  // this op will fault if arg1 is nil (and aux encodes a small offset)
 	usesScratch       bool  // this op requires scratch memory space
+	hasSideEffects    bool  // for "reasons", not to be eliminated.  E.g., atomic store, #19182.
 }
 
 type blockData struct {
@@ -208,6 +209,9 @@ func genOp() {
 			if v.usesScratch {
 				fmt.Fprintln(w, "usesScratch: true,")
 			}
+			if v.hasSideEffects {
+				fmt.Fprintln(w, "hasSideEffects: true,")
+			}
 			if a.name == "generic" {
 				fmt.Fprintln(w, "generic:true,")
 				fmt.Fprintln(w, "},") // close op
diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go
index 38a5e81..a4e5534 100644
--- a/src/cmd/compile/internal/ssa/likelyadjust.go
+++ b/src/cmd/compile/internal/ssa/likelyadjust.go
@@ -33,13 +33,24 @@ type loop struct {
 
 // outerinner records that outer contains inner
 func (sdom SparseTree) outerinner(outer, inner *loop) {
+	// There could be other outer loops found in some random order,
+	// locate the new outer loop appropriately among them.
 	oldouter := inner.outer
-	if oldouter == nil || sdom.isAncestorEq(oldouter.header, outer.header) {
-		inner.outer = outer
-		outer.isInner = false
-		if inner.containsCall {
-			outer.setContainsCall()
-		}
+	for oldouter != nil && sdom.isAncestor(outer.header, oldouter.header) {
+		inner = oldouter
+		oldouter = inner.outer
+	}
+	if outer == oldouter {
+		return
+	}
+	if oldouter != nil {
+		outer.outer = oldouter
+	}
+
+	inner.outer = outer
+	outer.isInner = false
+	if inner.containsCall {
+		outer.setContainsCall()
 	}
 }
 
diff --git a/src/cmd/compile/internal/ssa/loop_test.go b/src/cmd/compile/internal/ssa/loop_test.go
new file mode 100644
index 0000000..69a4962
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/loop_test.go
@@ -0,0 +1,87 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+	"testing"
+)
+
+func TestLoopConditionS390X(t *testing.T) {
+	// Test that a simple loop condition does not generate a conditional
+	// move (issue #19227).
+	//
+	// MOVDLT is generated when Less64 is lowered but should be
+	// optimized into an LT branch.
+	//
+	// For example, compiling the following loop:
+	//
+	//   for i := 0; i < N; i++ {
+	//     sum += 3
+	//   }
+	//
+	// should generate assembly similar to:
+	//   loop:
+	//     CMP    R0, R1
+	//     BGE    done
+	//     ADD    $3, R4
+	//     ADD    $1, R1
+	//     BR     loop
+	//   done:
+	//
+	// rather than:
+	// loop:
+	//     MOVD   $0, R2
+	//     MOVD   $1, R3
+	//     CMP    R0, R1
+	//     MOVDLT R2, R3
+	//     CMPW   R2, $0
+	//     BNE    done
+	//     ADD    $3, R4
+	//     ADD    $1, R1
+	//     BR     loop
+	//   done:
+	//
+	c := testConfigS390X(t)
+	fun := Fun(c, "entry",
+		Bloc("entry",
+			Valu("mem", OpInitMem, TypeMem, 0, nil),
+			Valu("SP", OpSP, TypeUInt64, 0, nil),
+			Valu("Nptr", OpOffPtr, TypeInt64Ptr, 8, nil, "SP"),
+			Valu("ret", OpOffPtr, TypeInt64Ptr, 16, nil, "SP"),
+			Valu("N", OpLoad, TypeInt64, 0, nil, "Nptr", "mem"),
+			Valu("starti", OpConst64, TypeInt64, 0, nil),
+			Valu("startsum", OpConst64, TypeInt64, 0, nil),
+			Goto("b1")),
+		Bloc("b1",
+			Valu("phii", OpPhi, TypeInt64, 0, nil, "starti", "i"),
+			Valu("phisum", OpPhi, TypeInt64, 0, nil, "startsum", "sum"),
+			Valu("cmp1", OpLess64, TypeBool, 0, nil, "phii", "N"),
+			If("cmp1", "b2", "b3")),
+		Bloc("b2",
+			Valu("c1", OpConst64, TypeInt64, 1, nil),
+			Valu("i", OpAdd64, TypeInt64, 0, nil, "phii", "c1"),
+			Valu("c3", OpConst64, TypeInt64, 3, nil),
+			Valu("sum", OpAdd64, TypeInt64, 0, nil, "phisum", "c3"),
+			Goto("b1")),
+		Bloc("b3",
+			Valu("store", OpStore, TypeMem, 8, nil, "ret", "phisum", "mem"),
+			Exit("store")))
+	CheckFunc(fun.f)
+	Compile(fun.f)
+	CheckFunc(fun.f)
+
+	checkOpcodeCounts(t, fun.f, map[Op]int{
+		OpS390XMOVDLT:    0,
+		OpS390XMOVDGT:    0,
+		OpS390XMOVDLE:    0,
+		OpS390XMOVDGE:    0,
+		OpS390XMOVDEQ:    0,
+		OpS390XMOVDNE:    0,
+		OpS390XCMP:       1,
+		OpS390XCMPWconst: 0,
+	})
+
+	fun.f.Free()
+}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 4c3164f..37b2f74 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -34,6 +34,7 @@ type opInfo struct {
 	faultOnNilArg0    bool // this op will fault if arg0 is nil (and aux encodes a small offset)
 	faultOnNilArg1    bool // this op will fault if arg1 is nil (and aux encodes a small offset)
 	usesScratch       bool // this op requires scratch memory space
+	hasSideEffects    bool // for "reasons", not to be eliminated.  E.g., atomic store, #19182.
 }
 
 type inputInfo struct {
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 9d11d03..2938bd1 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -7307,6 +7307,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		resultInArg0:   true,
 		faultOnNilArg1: true,
+		hasSideEffects: true,
 		asm:            x86.AXCHGL,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -7324,6 +7325,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		resultInArg0:   true,
 		faultOnNilArg1: true,
+		hasSideEffects: true,
 		asm:            x86.AXCHGQ,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -7342,6 +7344,7 @@ var opcodeTable = [...]opInfo{
 		resultInArg0:   true,
 		clobberFlags:   true,
 		faultOnNilArg1: true,
+		hasSideEffects: true,
 		asm:            x86.AXADDL,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -7360,6 +7363,7 @@ var opcodeTable = [...]opInfo{
 		resultInArg0:   true,
 		clobberFlags:   true,
 		faultOnNilArg1: true,
+		hasSideEffects: true,
 		asm:            x86.AXADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -7387,6 +7391,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         4,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            x86.ACMPXCHGL,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -7407,6 +7412,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         4,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            x86.ACMPXCHGQ,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -7427,6 +7433,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            x86.AANDB,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -7441,6 +7448,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            x86.AORB,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -10075,6 +10083,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
@@ -10088,6 +10097,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
@@ -10101,6 +10111,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
@@ -10114,6 +10125,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
@@ -10127,6 +10139,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
@@ -10140,6 +10153,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
@@ -10153,6 +10167,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
@@ -10166,6 +10181,7 @@ var opcodeTable = [...]opInfo{
 			inputs: []inputInfo{
 				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
 			},
+			clobbers: 2147483648, // F15
 			outputs: []outputInfo{
 				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
 			},
@@ -12657,6 +12673,7 @@ var opcodeTable = [...]opInfo{
 		name:           "STLR",
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            arm64.ASTLR,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -12669,6 +12686,7 @@ var opcodeTable = [...]opInfo{
 		name:           "STLRW",
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            arm64.ASTLRW,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -12682,6 +12700,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          3,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
@@ -12697,6 +12716,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          3,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
@@ -12712,6 +12732,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          3,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
@@ -12727,6 +12748,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          3,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
@@ -12743,6 +12765,7 @@ var opcodeTable = [...]opInfo{
 		resultNotInArgs: true,
 		clobberFlags:    true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
@@ -12760,6 +12783,7 @@ var opcodeTable = [...]opInfo{
 		resultNotInArgs: true,
 		clobberFlags:    true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
@@ -12775,6 +12799,7 @@ var opcodeTable = [...]opInfo{
 		name:           "LoweredAtomicAnd8",
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            arm64.AAND,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -12787,6 +12812,7 @@ var opcodeTable = [...]opInfo{
 		name:           "LoweredAtomicOr8",
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            arm64.AORR,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -13977,6 +14003,7 @@ var opcodeTable = [...]opInfo{
 		name:           "LoweredAtomicStore",
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
@@ -13988,6 +14015,7 @@ var opcodeTable = [...]opInfo{
 		name:           "LoweredAtomicStorezero",
 		argLen:         2,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
@@ -13999,6 +14027,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          3,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
@@ -14014,6 +14043,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          3,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
@@ -14030,6 +14060,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          2,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
@@ -14044,6 +14075,7 @@ var opcodeTable = [...]opInfo{
 		argLen:          4,
 		resultNotInArgs: true,
 		faultOnNilArg0:  true,
+		hasSideEffects:  true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
@@ -14059,6 +14091,7 @@ var opcodeTable = [...]opInfo{
 		name:           "LoweredAtomicAnd",
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            mips.AAND,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -14071,6 +14104,7 @@ var opcodeTable = [...]opInfo{
 		name:           "LoweredAtomicOr",
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            mips.AOR,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19500,6 +19534,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.AMOVW,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19514,6 +19549,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.AMOVD,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19527,6 +19563,7 @@ var opcodeTable = [...]opInfo{
 		auxType:        auxSymOff,
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.ALAA,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19543,6 +19580,7 @@ var opcodeTable = [...]opInfo{
 		auxType:        auxSymOff,
 		argLen:         3,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.ALAAG,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19570,6 +19608,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         4,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.ACS,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19590,6 +19629,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         4,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.ACSG,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19610,6 +19650,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.ACS,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -19628,6 +19669,7 @@ var opcodeTable = [...]opInfo{
 		argLen:         3,
 		clobberFlags:   true,
 		faultOnNilArg0: true,
+		hasSideEffects: true,
 		asm:            s390x.ACSG,
 		reg: regInfo{
 			inputs: []inputInfo{
@@ -21415,59 +21457,70 @@ var opcodeTable = [...]opInfo{
 		generic: true,
 	},
 	{
-		name:    "AtomicStore32",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicStore32",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicStore64",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicStore64",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicStorePtrNoWB",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicStorePtrNoWB",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicExchange32",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicExchange32",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicExchange64",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicExchange64",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicAdd32",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicAdd32",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicAdd64",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicAdd64",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicCompareAndSwap32",
-		argLen:  4,
-		generic: true,
+		name:           "AtomicCompareAndSwap32",
+		argLen:         4,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicCompareAndSwap64",
-		argLen:  4,
-		generic: true,
+		name:           "AtomicCompareAndSwap64",
+		argLen:         4,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicAnd8",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicAnd8",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 	{
-		name:    "AtomicOr8",
-		argLen:  3,
-		generic: true,
+		name:           "AtomicOr8",
+		argLen:         3,
+		hasSideEffects: true,
+		generic:        true,
 	},
 }
 
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 0f8a77f..234783b 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -14517,7 +14517,7 @@ func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
 	_ = b
 	// match: (Lrot16 <t> x [c])
 	// cond:
-	// result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+	// result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to32 x) [16-c&15]))
 	for {
 		t := v.Type
 		c := v.AuxInt
@@ -14529,7 +14529,9 @@ func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
 		v.AddArg(v0)
 		v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
 		v1.AuxInt = 16 - c&15
-		v1.AddArg(x)
+		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+		v2.AddArg(x)
+		v1.AddArg(v2)
 		v.AddArg(v1)
 		return true
 	}
@@ -14554,7 +14556,7 @@ func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
 	_ = b
 	// match: (Lrot8 <t> x [c])
 	// cond:
-	// result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+	// result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> (ZeroExt8to32 x) [8-c&7]))
 	for {
 		t := v.Type
 		c := v.AuxInt
@@ -14566,7 +14568,9 @@ func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
 		v.AddArg(v0)
 		v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
 		v1.AuxInt = 8 - c&7
-		v1.AddArg(x)
+		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+		v2.AddArg(x)
+		v1.AddArg(v2)
 		v.AddArg(v1)
 		return true
 	}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 862c645..5c49636 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -2625,7 +2625,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (FMOVDload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2637,7 +2637,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVDload)
@@ -2648,7 +2648,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2661,7 +2661,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64FMOVDload)
@@ -2677,7 +2677,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2690,7 +2690,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVDstore)
@@ -2702,7 +2702,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2716,7 +2716,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64FMOVDstore)
@@ -2733,7 +2733,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (FMOVSload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2745,7 +2745,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVSload)
@@ -2756,7 +2756,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2769,7 +2769,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64FMOVSload)
@@ -2785,7 +2785,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2798,7 +2798,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVSstore)
@@ -2810,7 +2810,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2824,7 +2824,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64FMOVSstore)
@@ -3997,7 +3997,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVDload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4009,7 +4009,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVDload)
@@ -4020,7 +4020,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4033,7 +4033,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVDload)
@@ -4100,7 +4100,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVDstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4113,7 +4113,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVDstore)
@@ -4125,7 +4125,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4139,7 +4139,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVDstore)
@@ -4178,7 +4178,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVDstorezero [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4190,7 +4190,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVDstorezero)
@@ -4201,7 +4201,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4214,7 +4214,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVDstorezero)
@@ -4230,7 +4230,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVHUload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4242,7 +4242,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHUload)
@@ -4253,7 +4253,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4266,7 +4266,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVHUload)
@@ -4369,7 +4369,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVHload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4381,7 +4381,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHload)
@@ -4392,7 +4392,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4405,7 +4405,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVHload)
@@ -4532,7 +4532,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVHstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4545,7 +4545,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
@@ -4557,7 +4557,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4571,7 +4571,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
@@ -4694,7 +4694,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4706,7 +4706,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHstorezero)
@@ -4717,7 +4717,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4730,7 +4730,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVHstorezero)
@@ -4746,7 +4746,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVWUload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4758,7 +4758,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWUload)
@@ -4769,7 +4769,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4782,7 +4782,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVWUload)
@@ -4909,7 +4909,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVWload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4921,7 +4921,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWload)
@@ -4932,7 +4932,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4945,7 +4945,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVWload)
@@ -5120,7 +5120,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -5133,7 +5133,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWstore)
@@ -5145,7 +5145,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -5159,7 +5159,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVWstore)
@@ -5240,7 +5240,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
 	// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+	// cond: is32Bit(off1+off2) && !isArg(sym) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
 	// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -5252,7 +5252,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+		if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWstorezero)
@@ -5263,7 +5263,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
 		return true
 	}
 	// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
 	// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -5276,7 +5276,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
 			break
 		}
 		v.reset(OpARM64MOVWstorezero)
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
index cbe9f1b..76eac5b 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -712,7 +712,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value, config *Config) bool {
 	_ = b
 	// match: (AtomicAnd8  ptr val mem)
 	// cond: !config.BigEndian
-	// result: (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) 		(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) 			(SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst  <config.fe.TypeUInt32()> [3] ptr))) 		(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> 			(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst <config.fe.TypeUInt32()> [3] 					(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
+	// result: (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) 		(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) 			(SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst  <config.fe.TypeUInt32()> [3] ptr))) 		(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> 			(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst <config.fe.TypeUInt32()> [3] ptr))))) mem)
 	for {
 		ptr := v.Args[0]
 		val := v.Args[1]
@@ -750,10 +750,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value, config *Config) bool {
 		v10.AuxInt = 3
 		v11 := b.NewValue0(v.Line, OpMIPSANDconst, config.fe.TypeUInt32())
 		v11.AuxInt = 3
-		v12 := b.NewValue0(v.Line, OpMIPSXORconst, config.fe.TypeUInt32())
-		v12.AuxInt = 3
-		v12.AddArg(ptr)
-		v11.AddArg(v12)
+		v11.AddArg(ptr)
 		v10.AddArg(v11)
 		v8.AddArg(v10)
 		v7.AddArg(v8)
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index 0425ced..866cf50 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -524,6 +524,8 @@ func rewriteValueS390X(v *Value, config *Config) bool {
 		return rewriteValueS390X_OpS390XMOVDload(v, config)
 	case OpS390XMOVDloadidx:
 		return rewriteValueS390X_OpS390XMOVDloadidx(v, config)
+	case OpS390XMOVDnop:
+		return rewriteValueS390X_OpS390XMOVDnop(v, config)
 	case OpS390XMOVDreg:
 		return rewriteValueS390X_OpS390XMOVDreg(v, config)
 	case OpS390XMOVDstore:
@@ -10195,9 +10197,68 @@ func rewriteValueS390X_OpS390XMOVDloadidx(v *Value, config *Config) bool {
 	}
 	return false
 }
+func rewriteValueS390X_OpS390XMOVDnop(v *Value, config *Config) bool {
+	b := v.Block
+	_ = b
+	// match: (MOVDnop <t> x)
+	// cond: t.Compare(x.Type) == CMPeq
+	// result: x
+	for {
+		t := v.Type
+		x := v.Args[0]
+		if !(t.Compare(x.Type) == CMPeq) {
+			break
+		}
+		v.reset(OpCopy)
+		v.Type = x.Type
+		v.AddArg(x)
+		return true
+	}
+	// match: (MOVDnop (MOVDconst [c]))
+	// cond:
+	// result: (MOVDconst [c])
+	for {
+		v_0 := v.Args[0]
+		if v_0.Op != OpS390XMOVDconst {
+			break
+		}
+		c := v_0.AuxInt
+		v.reset(OpS390XMOVDconst)
+		v.AuxInt = c
+		return true
+	}
+	return false
+}
 func rewriteValueS390X_OpS390XMOVDreg(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
+	// match: (MOVDreg <t> x)
+	// cond: t.Compare(x.Type) == CMPeq
+	// result: x
+	for {
+		t := v.Type
+		x := v.Args[0]
+		if !(t.Compare(x.Type) == CMPeq) {
+			break
+		}
+		v.reset(OpCopy)
+		v.Type = x.Type
+		v.AddArg(x)
+		return true
+	}
+	// match: (MOVDreg (MOVDconst [c]))
+	// cond:
+	// result: (MOVDconst [c])
+	for {
+		v_0 := v.Args[0]
+		if v_0.Op != OpS390XMOVDconst {
+			break
+		}
+		c := v_0.AuxInt
+		v.reset(OpS390XMOVDconst)
+		v.AuxInt = c
+		return true
+	}
 	// match: (MOVDreg x)
 	// cond: x.Uses == 1
 	// result: (MOVDnop x)
@@ -18181,7 +18242,7 @@ func rewriteBlockS390X(b *Block, config *Config) bool {
 		}
 		// match: (If cond yes no)
 		// cond:
-		// result: (NE (CMPWconst [0] (MOVBZreg cond)) yes no)
+		// result: (NE (CMPWconst [0] (MOVBZreg <config.fe.TypeBool()> cond)) yes no)
 		for {
 			v := b.Control
 			_ = v
@@ -18191,7 +18252,7 @@ func rewriteBlockS390X(b *Block, config *Config) bool {
 			b.Kind = BlockS390XNE
 			v0 := b.NewValue0(v.Line, OpS390XCMPWconst, TypeFlags)
 			v0.AuxInt = 0
-			v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+			v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeBool())
 			v1.AddArg(cond)
 			v0.AddArg(v1)
 			b.SetControl(v0)
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index f2a89d8..a455a9a 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -14,6 +14,7 @@ const (
 	ScoreMemory
 	ScoreDefault
 	ScoreFlags
+	ScoreSelectCall
 	ScoreControl // towards bottom of block
 )
 
@@ -110,10 +111,25 @@ func schedule(f *Func) {
 				// We want all the vardefs next.
 				score[v.ID] = ScoreVarDef
 			case v.Type.IsMemory():
-				// Schedule stores as early as possible. This tends to
-				// reduce register pressure. It also helps make sure
-				// VARDEF ops are scheduled before the corresponding LEA.
-				score[v.ID] = ScoreMemory
+				// Don't schedule independent operations after call to those functions.
+				// runtime.selectgo will jump to next instruction after this call,
+				// causing extra execution of those operations. Prevent it, by setting
+				// priority to high value.
+				if (v.Op == OpAMD64CALLstatic || v.Op == OpPPC64CALLstatic ||
+					v.Op == OpARMCALLstatic || v.Op == OpARM64CALLstatic ||
+					v.Op == Op386CALLstatic || v.Op == OpMIPS64CALLstatic ||
+					v.Op == OpS390XCALLstatic || v.Op == OpMIPSCALLstatic) &&
+					(isSameSym(v.Aux, "runtime.selectrecv") ||
+						isSameSym(v.Aux, "runtime.selectrecv2") ||
+						isSameSym(v.Aux, "runtime.selectsend") ||
+						isSameSym(v.Aux, "runtime.selectdefault")) {
+					score[v.ID] = ScoreSelectCall
+				} else {
+					// Schedule stores as early as possible. This tends to
+					// reduce register pressure. It also helps make sure
+					// VARDEF ops are scheduled before the corresponding LEA.
+					score[v.ID] = ScoreMemory
+				}
 			case v.Op == OpSelect0 || v.Op == OpSelect1:
 				// Schedule the pseudo-op of reading part of a tuple
 				// immediately after the tuple-generating op, since
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 56de65c..fa78578 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -2227,6 +2227,24 @@ func TestTestEmpty(t *testing.T) {
 	}
 }
 
+func TestTestRaceInstall(t *testing.T) {
+	if !canRace {
+		t.Skip("no race detector")
+	}
+
+	tg := testgo(t)
+	defer tg.cleanup()
+	tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
+
+	tg.tempDir("pkg")
+	pkgdir := tg.path("pkg")
+	tg.run("install", "-race", "-pkgdir="+pkgdir, "std")
+	tg.run("test", "-race", "-pkgdir="+pkgdir, "-i", "-v", "empty/pkg")
+	if tg.getStderr() != "" {
+		t.Error("go test -i -race: rebuilds cached packages")
+	}
+}
+
 func TestBuildDryRunWithCgo(t *testing.T) {
 	if !canCgo {
 		t.Skip("skipping because cgo not enabled")
diff --git a/src/cmd/go/pkg.go b/src/cmd/go/pkg.go
index e40f942..575f187 100644
--- a/src/cmd/go/pkg.go
+++ b/src/cmd/go/pkg.go
@@ -955,10 +955,6 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
 		if p.Name == "main" && goarch == "arm" {
 			importPaths = append(importPaths, "math")
 		}
-		// In coverage atomic mode everything depends on sync/atomic.
-		if testCoverMode == "atomic" && (!p.Standard || (p.ImportPath != "runtime/cgo" && p.ImportPath != "runtime/race" && p.ImportPath != "sync/atomic")) {
-			importPaths = append(importPaths, "sync/atomic")
-		}
 	}
 
 	// Runtime and its internal packages depend on runtime/internal/sys,
diff --git a/src/cmd/go/test.go b/src/cmd/go/test.go
index 6482f0f..35250c9 100644
--- a/src/cmd/go/test.go
+++ b/src/cmd/go/test.go
@@ -545,6 +545,10 @@ func runTest(cmd *Command, args []string) {
 
 	// Prepare build + run + print actions for all packages being tested.
 	for _, p := range pkgs {
+		// sync/atomic import is inserted by the cover tool. See #18486
+		if testCover && testCoverMode == "atomic" {
+			ensureImport(p, "sync/atomic")
+		}
 		buildTest, runTest, printTest, err := b.test(p)
 		if err != nil {
 			str := err.Error()
@@ -636,6 +640,23 @@ func runTest(cmd *Command, args []string) {
 	b.do(root)
 }
 
+// ensures that package p imports the named package.
+func ensureImport(p *Package, pkg string) {
+	for _, d := range p.deps {
+		if d.Name == pkg {
+			return
+		}
+	}
+
+	a := loadPackage(pkg, &importStack{})
+	if a.Error != nil {
+		fatalf("load %s: %v", pkg, a.Error)
+	}
+	computeStale(a)
+
+	p.imports = append(p.imports, a)
+}
+
 func contains(x []string, s string) bool {
 	for _, t := range x {
 		if t == s {
diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go
new file mode 100644
index 0000000..32fa3a3
--- /dev/null
+++ b/src/cmd/link/dwarf_test.go
@@ -0,0 +1,125 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"cmd/internal/objfile"
+	"debug/dwarf"
+	"internal/testenv"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"testing"
+)
+
+func TestDWARF(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		t.Skip("DWARF is not supported on Windows")
+	}
+
+	testenv.MustHaveCGO(t)
+	testenv.MustHaveGoBuild(t)
+
+	if runtime.GOOS == "plan9" {
+		t.Skip("skipping on plan9; no DWARF symbol table in executables")
+	}
+
+	out, err := exec.Command(testenv.GoToolPath(t), "list", "-f", "{{.Stale}}", "cmd/link").CombinedOutput()
+	if err != nil {
+		t.Fatalf("go list: %v\n%s", err, out)
+	}
+	if string(out) != "false\n" {
+		t.Fatalf("cmd/link is stale - run go install cmd/link")
+	}
+
+	tmpDir, err := ioutil.TempDir("", "go-link-TestDWARF")
+	if err != nil {
+		t.Fatal("TempDir failed: ", err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	for _, prog := range []string{"testprog", "testprogcgo"} {
+		t.Run(prog, func(t *testing.T) {
+			exe := filepath.Join(tmpDir, prog+".exe")
+			dir := "../../runtime/testdata/" + prog
+			out, err := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, dir).CombinedOutput()
+			if err != nil {
+				t.Fatalf("go build -o %v %v: %v\n%s", exe, dir, err, out)
+			}
+
+			f, err := objfile.Open(exe)
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer f.Close()
+
+			syms, err := f.Symbols()
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			var addr uint64
+			for _, sym := range syms {
+				if sym.Name == "main.main" {
+					addr = sym.Addr
+					break
+				}
+			}
+			if addr == 0 {
+				t.Fatal("cannot find main.main in symbols")
+			}
+
+			d, err := f.DWARF()
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			// TODO: We'd like to use filepath.Join here.
+			// Also related: golang.org/issue/19784.
+			wantFile := path.Join(prog, "main.go")
+			wantLine := 24
+			r := d.Reader()
+			var line dwarf.LineEntry
+			for {
+				cu, err := r.Next()
+				if err != nil {
+					t.Fatal(err)
+				}
+				if cu == nil {
+					break
+				}
+				if cu.Tag != dwarf.TagCompileUnit {
+					r.SkipChildren()
+					continue
+				}
+				lr, err := d.LineReader(cu)
+				if err != nil {
+					t.Fatal(err)
+				}
+				for {
+					err := lr.Next(&line)
+					if err == io.EOF {
+						break
+					}
+					if err != nil {
+						t.Fatal(err)
+					}
+					if line.Address == addr {
+						if !strings.HasSuffix(line.File.Name, wantFile) || line.Line != wantLine {
+							t.Errorf("%#x is %s:%d, want %s:%d", addr, line.File.Name, line.Line, filepath.Join("...", wantFile), wantLine)
+						}
+						return
+					}
+				}
+			}
+			t.Fatalf("did not find file:line for %#x (main.main)", addr)
+		})
+	}
+}
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 74d79d3..b624aa0 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -430,6 +430,10 @@ func (ctxt *Link) loadlib() {
 	// We now have enough information to determine the link mode.
 	determineLinkMode(ctxt)
 
+	if Headtype == obj.Hdarwin && Linkmode == LinkExternal {
+		*FlagTextAddr = 0
+	}
+
 	if Linkmode == LinkExternal && SysArch.Family == sys.PPC64 {
 		toc := ctxt.Syms.Lookup(".TOC.", 0)
 		toc.Type = obj.SDYNIMPORT
@@ -998,6 +1002,10 @@ func (l *Link) hostlink() {
 
 	if !*FlagS && !debug_s {
 		argv = append(argv, "-gdwarf-2")
+	} else if Headtype == obj.Hdarwin {
+		// Recent versions of macOS print
+		//	ld: warning: option -s is obsolete and being ignored
+		// so do not pass any arguments.
 	} else {
 		argv = append(argv, "-s")
 	}
@@ -1219,7 +1227,7 @@ func (l *Link) hostlink() {
 		l.Logf("%s", out)
 	}
 
-	if !*FlagS && !debug_s && Headtype == obj.Hdarwin {
+	if !*FlagS && !*FlagW && !debug_s && Headtype == obj.Hdarwin {
 		// Skip combining dwarf on arm.
 		if !SysArch.InFamily(sys.ARM, sys.ARM64) {
 			dsym := filepath.Join(*flagTmpdir, "go.dwarf")
diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go
index f3687da..1ab61b7 100644
--- a/src/cmd/link/internal/ld/macho.go
+++ b/src/cmd/link/internal/ld/macho.go
@@ -449,7 +449,7 @@ func Asmbmacho(ctxt *Link) {
 			ms.filesize = Segdata.Fileoff + Segdata.Filelen - Segtext.Fileoff
 		} else {
 			ms.filesize = Segdwarf.Fileoff + Segdwarf.Filelen - Segtext.Fileoff
-			ms.vsize = ms.filesize
+			ms.vsize = Segdwarf.Vaddr + Segdwarf.Length - Segtext.Vaddr
 		}
 	}
 
diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go
index dcc371e..8c6c4a8 100644
--- a/src/cmd/link/internal/ld/macho_combine_dwarf.go
+++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go
@@ -17,6 +17,7 @@ import (
 
 var realdwarf, linkseg *macho.Segment
 var dwarfstart, linkstart int64
+var dwarfaddr, linkaddr int64
 var linkoffset uint32
 
 const (
@@ -41,8 +42,7 @@ const (
 	LC_DYLIB_CODE_SIGN_DRS  = 0x2B
 	LC_ENCRYPTION_INFO_64   = 0x2C
 
-	dwarfMinAlign = 6  // 64 = 1 << 6
-	pageAlign     = 12 // 4096 = 1 << 12
+	pageAlign = 12 // 4096 = 1 << 12
 )
 
 type loadCmd struct {
@@ -157,16 +157,13 @@ func machoCombineDwarf(inexe, dsym, outexe string) error {
 	}
 
 	// Now copy the dwarf data into the output.
-	maxalign := uint32(dwarfMinAlign) //
-	for _, sect := range dwarfm.Sections {
-		if sect.Align > maxalign {
-			maxalign = sect.Align
-		}
-	}
-	dwarfstart = machoCalcStart(realdwarf.Offset, linkseg.Offset, maxalign)
+	// Kernel requires all loaded segments to be page-aligned in the file,
+	// even though we mark this one as being 0 bytes of virtual address space.
+	dwarfstart = machoCalcStart(realdwarf.Offset, linkseg.Offset, pageAlign)
 	if _, err = outf.Seek(dwarfstart, 0); err != nil {
 		return err
 	}
+	dwarfaddr = int64((linkseg.Addr + linkseg.Memsz + 1<<pageAlign - 1) &^ (1<<pageAlign - 1))
 
 	if _, err = dwarff.Seek(int64(realdwarf.Offset), 0); err != nil {
 		return err
@@ -277,10 +274,10 @@ func machoUpdateSegment(r loadCmdReader, seg, sect interface{}) error {
 		return err
 	}
 	// There shouldn't be any sections, but just to make sure...
-	return machoUpdateSections(r, segValue, reflect.ValueOf(sect), uint64(linkoffset))
+	return machoUpdateSections(r, segValue, reflect.ValueOf(sect), uint64(linkoffset), 0)
 }
 
-func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, delta uint64) error {
+func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset, deltaAddr uint64) error {
 	iseg := reflect.Indirect(seg)
 	nsect := iseg.FieldByName("Nsect").Uint()
 	if nsect == 0 {
@@ -291,16 +288,20 @@ func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, delta uint64)
 	isect := reflect.Indirect(sect)
 	offsetField := isect.FieldByName("Offset")
 	reloffField := isect.FieldByName("Reloff")
+	addrField := isect.FieldByName("Addr")
 	sectSize := int64(isect.Type().Size())
 	for i := uint64(0); i < nsect; i++ {
 		if err := r.ReadAt(sectOffset, sect.Interface()); err != nil {
 			return err
 		}
 		if offsetField.Uint() != 0 {
-			offsetField.SetUint(offsetField.Uint() + delta)
+			offsetField.SetUint(offsetField.Uint() + deltaOffset)
 		}
 		if reloffField.Uint() != 0 {
-			reloffField.SetUint(reloffField.Uint() + delta)
+			reloffField.SetUint(reloffField.Uint() + deltaOffset)
+		}
+		if addrField.Uint() != 0 {
+			addrField.SetUint(addrField.Uint() + deltaAddr)
 		}
 		if err := r.WriteAt(sectOffset, sect.Interface()); err != nil {
 			return err
@@ -327,15 +328,30 @@ func machoUpdateDwarfHeader(r *loadCmdReader) error {
 	if err := r.ReadAt(0, seg); err != nil {
 		return err
 	}
-	segValue := reflect.ValueOf(seg)
-	offset := reflect.Indirect(segValue).FieldByName("Offset")
+	segv := reflect.ValueOf(seg).Elem()
+
+	segv.FieldByName("Offset").SetUint(uint64(dwarfstart))
+	segv.FieldByName("Addr").SetUint(uint64(dwarfaddr))
+
+	deltaOffset := uint64(dwarfstart) - realdwarf.Offset
+	deltaAddr := uint64(dwarfaddr) - realdwarf.Addr
+
+	// If we set Memsz to 0 (and might as well set Addr too),
+	// then the xnu kernel will bail out halfway through load_segment
+	// and not apply further sanity checks that we might fail in the future.
+	// We don't need the DWARF information actually available in memory.
+	// But if we do this for buildmode=c-shared then the user-space
+	// dynamic loader complains about memsz < filesz. Sigh.
+	if Buildmode != BuildmodeCShared {
+		segv.FieldByName("Addr").SetUint(0)
+		segv.FieldByName("Memsz").SetUint(0)
+		deltaAddr = 0
+	}
 
-	delta := uint64(dwarfstart) - realdwarf.Offset
-	offset.SetUint(offset.Uint() + delta)
 	if err := r.WriteAt(0, seg); err != nil {
 		return err
 	}
-	return machoUpdateSections(*r, segValue, reflect.ValueOf(sect), delta)
+	return machoUpdateSections(*r, segv, reflect.ValueOf(sect), deltaOffset, deltaAddr)
 }
 
 func machoUpdateLoadCommand(r loadCmdReader, cmd interface{}, fields ...string) error {
diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go
index 97107b9..cf2c532 100644
--- a/src/cmd/link/internal/ppc64/asm.go
+++ b/src/cmd/link/internal/ppc64/asm.go
@@ -87,6 +87,7 @@ func genplt(ctxt *ld.Link) {
 	//
 	// This assumes "case 1" from the ABI, where the caller needs
 	// us to save and restore the TOC pointer.
+	var stubs []*ld.Symbol
 	for _, s := range ctxt.Textp {
 		for i := range s.R {
 			r := &s.R[i]
@@ -108,7 +109,7 @@ func genplt(ctxt *ld.Link) {
 			if stub.Size == 0 {
 				// Need outer to resolve .TOC.
 				stub.Outer = s
-				ctxt.Textp = append(ctxt.Textp, stub)
+				stubs = append(stubs, stub)
 				gencallstub(ctxt, 1, stub, r.Sym)
 			}
 
@@ -121,6 +122,11 @@ func genplt(ctxt *ld.Link) {
 			ctxt.Arch.ByteOrder.PutUint32(s.P[r.Off+4:], o1)
 		}
 	}
+	// Put call stubs at the beginning (instead of the end).
+	// So when resolving the relocations to calls to the stubs,
+	// the addresses are known and trampolines can be inserted
+	// when necessary.
+	ctxt.Textp = append(stubs, ctxt.Textp...)
 }
 
 func genaddmoduledata(ctxt *ld.Link) {
diff --git a/src/cmd/link/linkbig_test.go b/src/cmd/link/linkbig_test.go
index d793c2f..960d89f 100644
--- a/src/cmd/link/linkbig_test.go
+++ b/src/cmd/link/linkbig_test.go
@@ -21,7 +21,7 @@ import (
 
 func TestLargeText(t *testing.T) {
 	if testing.Short() || (obj.GOARCH != "ppc64le" && obj.GOARCH != "ppc64" && obj.GOARCH != "arm") {
-		t.Skip("Skipping large text section test in short mode or on %s", obj.GOARCH)
+		t.Skipf("Skipping large text section test in short mode or on %s", obj.GOARCH)
 	}
 	testenv.MustHaveGoBuild(t)
 
diff --git a/src/crypto/tls/common.go b/src/crypto/tls/common.go
index 276d176..de833a9 100644
--- a/src/crypto/tls/common.go
+++ b/src/crypto/tls/common.go
@@ -563,6 +563,7 @@ func (c *Config) Clone() *Config {
 		Certificates:                c.Certificates,
 		NameToCertificate:           c.NameToCertificate,
 		GetCertificate:              c.GetCertificate,
+		GetClientCertificate:        c.GetClientCertificate,
 		GetConfigForClient:          c.GetConfigForClient,
 		VerifyPeerCertificate:       c.VerifyPeerCertificate,
 		RootCAs:                     c.RootCAs,
diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go
index 8933f4f..86812f0 100644
--- a/src/crypto/tls/tls_test.go
+++ b/src/crypto/tls/tls_test.go
@@ -13,13 +13,11 @@ import (
 	"io"
 	"io/ioutil"
 	"math"
-	"math/rand"
 	"net"
 	"os"
 	"reflect"
 	"strings"
 	"testing"
-	"testing/quick"
 	"time"
 )
 
@@ -568,11 +566,50 @@ func TestConnCloseWrite(t *testing.T) {
 	}
 }
 
-func TestClone(t *testing.T) {
+func TestCloneFuncFields(t *testing.T) {
+	const expectedCount = 5
+	called := 0
+
+	c1 := Config{
+		Time: func() time.Time {
+			called |= 1 << 0
+			return time.Time{}
+		},
+		GetCertificate: func(*ClientHelloInfo) (*Certificate, error) {
+			called |= 1 << 1
+			return nil, nil
+		},
+		GetClientCertificate: func(*CertificateRequestInfo) (*Certificate, error) {
+			called |= 1 << 2
+			return nil, nil
+		},
+		GetConfigForClient: func(*ClientHelloInfo) (*Config, error) {
+			called |= 1 << 3
+			return nil, nil
+		},
+		VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
+			called |= 1 << 4
+			return nil
+		},
+	}
+
+	c2 := c1.Clone()
+
+	c2.Time()
+	c2.GetCertificate(nil)
+	c2.GetClientCertificate(nil)
+	c2.GetConfigForClient(nil)
+	c2.VerifyPeerCertificate(nil, nil)
+
+	if called != (1<<expectedCount)-1 {
+		t.Fatalf("expected %d calls but saw calls %b", expectedCount, called)
+	}
+}
+
+func TestCloneNonFuncFields(t *testing.T) {
 	var c1 Config
 	v := reflect.ValueOf(&c1).Elem()
 
-	rnd := rand.New(rand.NewSource(time.Now().Unix()))
 	typ := v.Type()
 	for i := 0; i < typ.NumField(); i++ {
 		f := v.Field(i)
@@ -581,40 +618,49 @@ func TestClone(t *testing.T) {
 			continue
 		}
 
-		// testing/quick can't handle functions or interfaces.
-		fn := typ.Field(i).Name
-		switch fn {
+		// testing/quick can't handle functions or interfaces and so
+		// isn't used here.
+		switch fn := typ.Field(i).Name; fn {
 		case "Rand":
 			f.Set(reflect.ValueOf(io.Reader(os.Stdin)))
-			continue
 		case "Time", "GetCertificate", "GetConfigForClient", "VerifyPeerCertificate", "GetClientCertificate":
-			// DeepEqual can't compare functions.
-			continue
+			// DeepEqual can't compare functions. If you add a
+			// function field to this list, you must also change
+			// TestCloneFuncFields to ensure that the func field is
+			// cloned.
 		case "Certificates":
 			f.Set(reflect.ValueOf([]Certificate{
 				{Certificate: [][]byte{{'b'}}},
 			}))
-			continue
 		case "NameToCertificate":
 			f.Set(reflect.ValueOf(map[string]*Certificate{"a": nil}))
-			continue
 		case "RootCAs", "ClientCAs":
 			f.Set(reflect.ValueOf(x509.NewCertPool()))
-			continue
 		case "ClientSessionCache":
 			f.Set(reflect.ValueOf(NewLRUClientSessionCache(10)))
-			continue
 		case "KeyLogWriter":
 			f.Set(reflect.ValueOf(io.Writer(os.Stdout)))
-			continue
-
-		}
-
-		q, ok := quick.Value(f.Type(), rnd)
-		if !ok {
-			t.Fatalf("quick.Value failed on field %s", fn)
+		case "NextProtos":
+			f.Set(reflect.ValueOf([]string{"a", "b"}))
+		case "ServerName":
+			f.Set(reflect.ValueOf("b"))
+		case "ClientAuth":
+			f.Set(reflect.ValueOf(VerifyClientCertIfGiven))
+		case "InsecureSkipVerify", "SessionTicketsDisabled", "DynamicRecordSizingDisabled", "PreferServerCipherSuites":
+			f.Set(reflect.ValueOf(true))
+		case "MinVersion", "MaxVersion":
+			f.Set(reflect.ValueOf(uint16(VersionTLS12)))
+		case "SessionTicketKey":
+			f.Set(reflect.ValueOf([32]byte{}))
+		case "CipherSuites":
+			f.Set(reflect.ValueOf([]uint16{1, 2}))
+		case "CurvePreferences":
+			f.Set(reflect.ValueOf([]CurveID{CurveP256}))
+		case "Renegotiation":
+			f.Set(reflect.ValueOf(RenegotiateOnceAsClient))
+		default:
+			t.Errorf("all fields must be accounted for, but saw unknown field %q", fn)
 		}
-		f.Set(q)
 	}
 
 	c2 := c1.Clone()
diff --git a/src/encoding/xml/marshal_test.go b/src/encoding/xml/marshal_test.go
index 5ec7ece..0126146 100644
--- a/src/encoding/xml/marshal_test.go
+++ b/src/encoding/xml/marshal_test.go
@@ -2428,7 +2428,10 @@ func TestIssue16158(t *testing.T) {
 	err := Unmarshal([]byte(data), &struct {
 		B byte `xml:"b,attr,omitempty"`
 	}{})
-	if err == nil {
-		t.Errorf("Unmarshal: expected error, got nil")
+
+	// For Go 1.8.1 we've restored the old "no errors reported" behavior.
+	// We'll try again in Go 1.9 to report errors.
+	if err != nil {
+		t.Errorf("Unmarshal: expected nil, got error")
 	}
 }
diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
index 5a89d5f..799b57e 100644
--- a/src/encoding/xml/read.go
+++ b/src/encoding/xml/read.go
@@ -285,7 +285,8 @@ func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
 		return nil
 	}
 
-	return copyValue(val, []byte(attr.Value))
+	copyValue(val, []byte(attr.Value))
+	return nil
 }
 
 var (
diff --git a/src/encoding/xml/xml_test.go b/src/encoding/xml/xml_test.go
index dad6ed9..f43a5e7 100644
--- a/src/encoding/xml/xml_test.go
+++ b/src/encoding/xml/xml_test.go
@@ -797,3 +797,37 @@ func TestIssue12417(t *testing.T) {
 		}
 	}
 }
+
+func TestIssue19333(t *testing.T) {
+	type X struct {
+		XMLName Name `xml:"X"`
+		A       int  `xml:",attr"`
+		C       int
+	}
+
+	var tests = []struct {
+		input string
+		ok    bool
+	}{
+		{`<X></X>`, true},
+		{`<X A=""></X>`, true},
+		{`<X A="bad"></X>`, true},
+		{`<X></X>`, true},
+		{`<X><C></C></X>`, false},
+		{`<X><C/></X>`, false},
+		{`<X><C>bad</C></X>`, false},
+	}
+
+	for _, tt := range tests {
+		err := Unmarshal([]byte(tt.input), new(X))
+		if tt.ok {
+			if err != nil {
+				t.Errorf("%s: unexpected error: %v", tt.input, err)
+			}
+		} else {
+			if err == nil {
+				t.Errorf("%s: unexpected success", tt.input)
+			}
+		}
+	}
+}
diff --git a/src/image/png/reader.go b/src/image/png/reader.go
index 32f78f0..8299df5 100644
--- a/src/image/png/reader.go
+++ b/src/image/png/reader.go
@@ -612,6 +612,11 @@ func (d *decoder) readImagePass(r io.Reader, pass int, allocateOnly bool) (image
 				}
 			}
 		case cbG8:
+			if d.useTransparent {
+				// Match error from Go 1.7 and earlier.
+				// Go 1.9 will decode this properly.
+				return nil, chunkOrderError
+			}
 			copy(gray.Pix[pixOffset:], cdat)
 			pixOffset += gray.Stride
 		case cbGA8:
diff --git a/src/image/png/reader_test.go b/src/image/png/reader_test.go
index b9e9f4d..503b5dc 100644
--- a/src/image/png/reader_test.go
+++ b/src/image/png/reader_test.go
@@ -629,3 +629,13 @@ func BenchmarkDecodeRGB(b *testing.B) {
 func BenchmarkDecodeInterlacing(b *testing.B) {
 	benchmarkDecode(b, "testdata/benchRGB-interlace.png", 4)
 }
+
+func TestIssue19553(t *testing.T) {
+	var buf = []byte{
+		0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x85, 0x2c, 0x88, 0x80, 0x00, 0x00, 0x00, 0x02, 0x74, 0x52, 0x4e, 0x53, 0x00, 0xff, 0x5b, 0x91, 0x22, 0xb5, 0x00, 0x00, 0x00, 0x02, 0x62, 0x4b, 0x47, 0x44, 0x00, 0xff, 0x87, 0x8f, 0xcc, 0xbf, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0a, 0xf0, 0x00, 0x00, 0x0a, 0xf0, 0x01, 0x42, 0xac, 0x34, 0x98 [...]
+	}
+	_, err := Decode(bytes.NewReader(buf))
+	if err != chunkOrderError {
+		t.Errorf("Decode: expected chunkOrderError for transparent gray8, got %v", err)
+	}
+}
diff --git a/src/internal/testenv/testenv.go b/src/internal/testenv/testenv.go
index 10384b6..f7c4ad2 100644
--- a/src/internal/testenv/testenv.go
+++ b/src/internal/testenv/testenv.go
@@ -138,6 +138,15 @@ func MustHaveExternalNetwork(t *testing.T) {
 	}
 }
 
+var haveCGO bool
+
+// MustHaveCGO calls t.Skip if cgo is not available.
+func MustHaveCGO(t *testing.T) {
+	if !haveCGO {
+		t.Skipf("skipping test: no cgo")
+	}
+}
+
 // HasSymlink reports whether the current system can use os.Symlink.
 func HasSymlink() bool {
 	ok, _ := hasSymlink()
diff --git a/src/internal/testenv/testenv_cgo.go b/src/internal/testenv/testenv_cgo.go
new file mode 100644
index 0000000..e3d4d16
--- /dev/null
+++ b/src/internal/testenv/testenv_cgo.go
@@ -0,0 +1,11 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build cgo
+
+package testenv
+
+func init() {
+	haveCGO = true
+}
diff --git a/src/net/http/http.go b/src/net/http/http.go
index 826f7ff..b95ca89 100644
--- a/src/net/http/http.go
+++ b/src/net/http/http.go
@@ -20,7 +20,7 @@ const maxInt64 = 1<<63 - 1
 
 // aLongTimeAgo is a non-zero time, far in the past, used for
 // immediate cancelation of network operations.
-var aLongTimeAgo = time.Unix(233431200, 0)
+var aLongTimeAgo = time.Unix(1, 0)
 
 // TODO(bradfitz): move common stuff here. The other files have accumulated
 // generic http stuff in random places.
diff --git a/src/net/net.go b/src/net/net.go
index 81206ea..a8b5736 100644
--- a/src/net/net.go
+++ b/src/net/net.go
@@ -468,7 +468,7 @@ func (e *OpError) Error() string {
 var (
 	// aLongTimeAgo is a non-zero time, far in the past, used for
 	// immediate cancelation of dials.
-	aLongTimeAgo = time.Unix(233431200, 0)
+	aLongTimeAgo = time.Unix(1, 0)
 
 	// nonDeadline and noCancel are just zero values for
 	// readability with functions taking too many parameters.
diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go
index 3433745..bd20dd4 100644
--- a/src/os/exec/exec_test.go
+++ b/src/os/exec/exec_test.go
@@ -262,9 +262,13 @@ func TestStdinCloseRace(t *testing.T) {
 		t.Fatalf("Start: %v", err)
 	}
 	go func() {
-		if err := cmd.Process.Kill(); err != nil {
-			t.Errorf("Kill: %v", err)
-		}
+		// We don't check the error return of Kill. It is
+		// possible that the process has already exited, in
+		// which case Kill will return an error "process
+		// already finished". The purpose of this test is to
+		// see whether the race detector reports an error; it
+		// doesn't matter whether this Kill succeeds or not.
+		cmd.Process.Kill()
 	}()
 	go func() {
 		// Send the wrong string, so that the child fails even
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 3eca293..7f8c129 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -1681,6 +1681,11 @@ func (p Point) GCMethod(k int) int {
 }
 
 // This will be index 3.
+func (p Point) NoArgs() {
+	// Exercise no-argument/no-result paths.
+}
+
+// This will be index 4.
 func (p Point) TotalDist(points ...Point) int {
 	tot := 0
 	for _, q := range points {
@@ -1709,6 +1714,15 @@ func TestMethod(t *testing.T) {
 		t.Errorf("Type MethodByName returned %d; want 275", i)
 	}
 
+	m, ok = TypeOf(p).MethodByName("NoArgs")
+	if !ok {
+		t.Fatalf("method by name failed")
+	}
+	n := len(m.Func.Call([]Value{ValueOf(p)}))
+	if n != 0 {
+		t.Errorf("NoArgs returned %d values; want 0", n)
+	}
+
 	i = TypeOf(&p).Method(1).Func.Call([]Value{ValueOf(&p), ValueOf(12)})[0].Int()
 	if i != 300 {
 		t.Errorf("Pointer Type Method returned %d; want 300", i)
@@ -1723,6 +1737,15 @@ func TestMethod(t *testing.T) {
 		t.Errorf("Pointer Type MethodByName returned %d; want 325", i)
 	}
 
+	m, ok = TypeOf(&p).MethodByName("NoArgs")
+	if !ok {
+		t.Fatalf("method by name failed")
+	}
+	n = len(m.Func.Call([]Value{ValueOf(&p)}))
+	if n != 0 {
+		t.Errorf("NoArgs returned %d values; want 0", n)
+	}
+
 	// Curried method of value.
 	tfunc := TypeOf((func(int) int)(nil))
 	v := ValueOf(p).Method(1)
@@ -1741,6 +1764,8 @@ func TestMethod(t *testing.T) {
 	if i != 375 {
 		t.Errorf("Value MethodByName returned %d; want 375", i)
 	}
+	v = ValueOf(p).MethodByName("NoArgs")
+	v.Call(nil)
 
 	// Curried method of pointer.
 	v = ValueOf(&p).Method(1)
@@ -1759,6 +1784,8 @@ func TestMethod(t *testing.T) {
 	if i != 425 {
 		t.Errorf("Pointer Value MethodByName returned %d; want 425", i)
 	}
+	v = ValueOf(&p).MethodByName("NoArgs")
+	v.Call(nil)
 
 	// Curried method of interface value.
 	// Have to wrap interface value in a struct to get at it.
@@ -1808,6 +1835,9 @@ func TestMethodValue(t *testing.T) {
 	if i != 275 {
 		t.Errorf("Value MethodByName returned %d; want 275", i)
 	}
+	v = ValueOf(p).MethodByName("NoArgs")
+	ValueOf(v.Interface()).Call(nil)
+	v.Interface().(func())()
 
 	// Curried method of pointer.
 	v = ValueOf(&p).Method(1)
@@ -1826,6 +1856,9 @@ func TestMethodValue(t *testing.T) {
 	if i != 325 {
 		t.Errorf("Pointer Value MethodByName returned %d; want 325", i)
 	}
+	v = ValueOf(&p).MethodByName("NoArgs")
+	ValueOf(v.Interface()).Call(nil)
+	v.Interface().(func())()
 
 	// Curried method of pointer to pointer.
 	pp := &p
@@ -1881,7 +1914,7 @@ func TestVariadicMethodValue(t *testing.T) {
 
 	// Curried method of value.
 	tfunc := TypeOf((func(...Point) int)(nil))
-	v := ValueOf(p).Method(3)
+	v := ValueOf(p).Method(4)
 	if tt := v.Type(); tt != tfunc {
 		t.Errorf("Variadic Method Type is %s; want %s", tt, tfunc)
 	}
diff --git a/src/reflect/value.go b/src/reflect/value.go
index 699ba69..387b9f3 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -630,8 +630,11 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
 	args := framePool.Get().(unsafe.Pointer)
 
 	// Copy in receiver and rest of args.
+	// Avoid constructing out-of-bounds pointers if there are no args.
 	storeRcvr(rcvr, args)
-	typedmemmovepartial(frametype, unsafe.Pointer(uintptr(args)+ptrSize), frame, ptrSize, argSize-ptrSize)
+	if argSize-ptrSize > 0 {
+		typedmemmovepartial(frametype, unsafe.Pointer(uintptr(args)+ptrSize), frame, ptrSize, argSize-ptrSize)
+	}
 
 	// Call.
 	call(frametype, fn, args, uint32(frametype.size), uint32(retOffset))
@@ -641,15 +644,18 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
 	// a receiver) is different from the layout of the fn call, which has
 	// a receiver.
 	// Ignore any changes to args and just copy return values.
-	callerRetOffset := retOffset - ptrSize
-	if runtime.GOARCH == "amd64p32" {
-		callerRetOffset = align(argSize-ptrSize, 8)
-	}
-	typedmemmovepartial(frametype,
-		unsafe.Pointer(uintptr(frame)+callerRetOffset),
-		unsafe.Pointer(uintptr(args)+retOffset),
-		retOffset,
-		frametype.size-retOffset)
+	// Avoid constructing out-of-bounds pointers if there are no return values.
+	if frametype.size-retOffset > 0 {
+		callerRetOffset := retOffset - ptrSize
+		if runtime.GOARCH == "amd64p32" {
+			callerRetOffset = align(argSize-ptrSize, 8)
+		}
+		typedmemmovepartial(frametype,
+			unsafe.Pointer(uintptr(frame)+callerRetOffset),
+			unsafe.Pointer(uintptr(args)+retOffset),
+			retOffset,
+			frametype.size-retOffset)
+	}
 
 	// This is untyped because the frame is really a stack, even
 	// though it's a heap object.
diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go
index 97deed8..182c84b 100644
--- a/src/runtime/crash_unix_test.go
+++ b/src/runtime/crash_unix_test.go
@@ -9,6 +9,7 @@ package runtime_test
 import (
 	"bytes"
 	"internal/testenv"
+	"io"
 	"io/ioutil"
 	"os"
 	"os/exec"
@@ -153,6 +154,78 @@ func loop(i int, c chan bool) {
 }
 `
 
+func TestPanicSystemstack(t *testing.T) {
+	// Test that GOTRACEBACK=crash prints both the system and user
+	// stack of other threads.
+
+	// The GOTRACEBACK=crash handler takes 0.1 seconds even if
+	// it's not writing a core file and potentially much longer if
+	// it is. Skip in short mode.
+	if testing.Short() {
+		t.Skip("Skipping in short mode (GOTRACEBACK=crash is slow)")
+	}
+
+	t.Parallel()
+	cmd := exec.Command(os.Args[0], "testPanicSystemstackInternal")
+	cmd = testEnv(cmd)
+	cmd.Env = append(cmd.Env, "GOTRACEBACK=crash")
+	pr, pw, err := os.Pipe()
+	if err != nil {
+		t.Fatal("creating pipe: ", err)
+	}
+	cmd.Stderr = pw
+	if err := cmd.Start(); err != nil {
+		t.Fatal("starting command: ", err)
+	}
+	defer cmd.Process.Wait()
+	defer cmd.Process.Kill()
+	if err := pw.Close(); err != nil {
+		t.Log("closing write pipe: ", err)
+	}
+	defer pr.Close()
+
+	// Wait for "x\nx\n" to indicate readiness.
+	buf := make([]byte, 4)
+	_, err = io.ReadFull(pr, buf)
+	if err != nil || string(buf) != "x\nx\n" {
+		t.Fatal("subprocess failed; output:\n", string(buf))
+	}
+
+	// Send SIGQUIT.
+	if err := cmd.Process.Signal(syscall.SIGQUIT); err != nil {
+		t.Fatal("signaling subprocess: ", err)
+	}
+
+	// Get traceback.
+	tb, err := ioutil.ReadAll(pr)
+	if err != nil {
+		t.Fatal("reading traceback from pipe: ", err)
+	}
+
+	// Traceback should have two testPanicSystemstackInternal's
+	// and two blockOnSystemStackInternal's.
+	if bytes.Count(tb, []byte("testPanicSystemstackInternal")) != 2 {
+		t.Fatal("traceback missing user stack:\n", string(tb))
+	} else if bytes.Count(tb, []byte("blockOnSystemStackInternal")) != 2 {
+		t.Fatal("traceback missing system stack:\n", string(tb))
+	}
+}
+
+func init() {
+	if len(os.Args) >= 2 && os.Args[1] == "testPanicSystemstackInternal" {
+		// Get two threads running on the system stack with
+		// something recognizable in the stack trace.
+		runtime.GOMAXPROCS(2)
+		go testPanicSystemstackInternal()
+		testPanicSystemstackInternal()
+	}
+}
+
+func testPanicSystemstackInternal() {
+	runtime.BlockOnSystemStack()
+	os.Exit(1) // Should be unreachable.
+}
+
 func TestSignalExitStatus(t *testing.T) {
 	testenv.MustHaveGoBuild(t)
 	exe, err := buildTestProg(t, "testprog")
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 9b76555..3b33c84 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -245,3 +245,16 @@ func CountPagesInUse() (pagesInUse, counted uintptr) {
 
 	return
 }
+
+// BlockOnSystemStack switches to the system stack, prints "x\n" to
+// stderr, and blocks in a stack containing
+// "runtime.blockOnSystemStackInternal".
+func BlockOnSystemStack() {
+	systemstack(blockOnSystemStackInternal)
+}
+
+func blockOnSystemStackInternal() {
+	print("x\n")
+	lock(&deadlock)
+	lock(&deadlock)
+}
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index 40c0e85..5d0bf81 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -260,6 +260,12 @@ func check() {
 		throw("atomicor8")
 	}
 
+	m = [4]byte{0xff, 0xff, 0xff, 0xff}
+	atomic.And8(&m[1], 0x1)
+	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
+		throw("atomicand8")
+	}
+
 	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
 	if j == j {
 		throw("float64nan")
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index 576a1fb..37318ff 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -171,6 +171,7 @@ func semrelease(addr *uint32) {
 			for x := root.head; x != nil; x = x.next {
 				if x.elem == unsafe.Pointer(addr) {
 					x.acquiretime = t0
+					break
 				}
 			}
 			mutexevent(t0-s.acquiretime, 3)
diff --git a/src/runtime/signal_sighandler.go b/src/runtime/signal_sighandler.go
index 5af12d7..758e42f 100644
--- a/src/runtime/signal_sighandler.go
+++ b/src/runtime/signal_sighandler.go
@@ -101,7 +101,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
 		if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
 			// tracebackothers on original m skipped this one; trace it now.
 			goroutineheader(_g_.m.curg)
-			traceback(^uintptr(0), ^uintptr(0), 0, gp)
+			traceback(^uintptr(0), ^uintptr(0), 0, _g_.m.curg)
 		} else if crashing == 0 {
 			tracebackothers(gp)
 			print("\n")
diff --git a/src/text/template/multi_test.go b/src/text/template/multi_test.go
index 8142f00..5d8c08f 100644
--- a/src/text/template/multi_test.go
+++ b/src/text/template/multi_test.go
@@ -363,7 +363,7 @@ func TestEmptyTemplate(t *testing.T) {
 		{[]string{"{{.}}", ""}, "twice", ""},
 	}
 
-	for _, c := range cases {
+	for i, c := range cases {
 		root := New("root")
 
 		var (
@@ -378,10 +378,43 @@ func TestEmptyTemplate(t *testing.T) {
 		}
 		buf := &bytes.Buffer{}
 		if err := m.Execute(buf, c.in); err != nil {
-			t.Fatal(err)
+			t.Error(i, err)
+			continue
 		}
 		if buf.String() != c.want {
 			t.Errorf("expected string %q: got %q", c.want, buf.String())
 		}
 	}
 }
+
+// Issue 19249 was a regression in 1.8 caused by the handling of empty
+// templates added in that release, which got different answers depending
+// on the order templates appeared in the internal map.
+func TestIssue19294(t *testing.T) {
+	// The empty block in "xhtml" should be replaced during execution
+	// by the contents of "stylesheet", but if the internal map associating
+	// names with templates is built in the wrong order, the empty block
+	// looks non-empty and this doesn't happen.
+	var inlined = map[string]string{
+		"stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`,
+		"xhtml":      `{{block "stylesheet" .}}{{end}}`,
+	}
+	all := []string{"stylesheet", "xhtml"}
+	for i := 0; i < 100; i++ {
+		res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`)
+		if err != nil {
+			t.Fatal(err)
+		}
+		for _, name := range all {
+			_, err := res.New(name).Parse(inlined[name])
+			if err != nil {
+				t.Fatal(err)
+			}
+		}
+		var buf bytes.Buffer
+		res.Execute(&buf, 0)
+		if buf.String() != "stylesheet" {
+			t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet")
+		}
+	}
+}
diff --git a/src/text/template/template.go b/src/text/template/template.go
index b6fceb1..3b4f34b 100644
--- a/src/text/template/template.go
+++ b/src/text/template/template.go
@@ -127,7 +127,7 @@ func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error
 	// Even if nt == t, we need to install it in the common.tmpl map.
 	if replace, err := t.associate(nt, tree); err != nil {
 		return nil, err
-	} else if replace {
+	} else if replace || nt.Tree == nil {
 		nt.Tree = tree
 	}
 	return nt, nil
@@ -215,7 +215,7 @@ func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
 	if new.common != t.common {
 		panic("internal error: associate not common")
 	}
-	if t.tmpl[new.name] != nil && parse.IsEmptyTree(tree.Root) && t.Tree != nil {
+	if old := t.tmpl[new.name]; old != nil && parse.IsEmptyTree(tree.Root) && old.Tree != nil {
 		// If a template by that name exists,
 		// don't replace it with an empty template.
 		return false, nil
diff --git a/src/time/format_test.go b/src/time/format_test.go
index 219c2ca..d0013bc 100644
--- a/src/time/format_test.go
+++ b/src/time/format_test.go
@@ -245,27 +245,45 @@ func TestParseDayOutOfRange(t *testing.T) {
 	}
 }
 
+// TestParseInLocation checks that the Parse and ParseInLocation
+// functions do not get confused by the fact that AST (Arabia Standard
+// Time) and AST (Atlantic Standard Time) are different time zones,
+// even though they have the same abbreviation.
+//
+// ICANN has been slowly phasing out invented abbreviation in favor of
+// numeric time zones (for example, the Asia/Baghdad time zone
+// abbreviation got changed from AST to +03 in the 2017a tzdata
+// release); but we still want to make sure that the time package does
+// not get confused on systems with slightly older tzdata packages.
 func TestParseInLocation(t *testing.T) {
-	// Check that Parse (and ParseInLocation) understand that
-	// Feb 01 AST (Arabia Standard Time) and Feb 01 AST (Atlantic Standard Time)
-	// are in different time zones even though both are called AST
 
 	baghdad, err := LoadLocation("Asia/Baghdad")
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	t1, err := ParseInLocation("Jan 02 2006 MST", "Feb 01 2013 AST", baghdad)
+	var t1, t2 Time
+
+	t1, err = ParseInLocation("Jan 02 2006 MST", "Feb 01 2013 AST", baghdad)
 	if err != nil {
 		t.Fatal(err)
 	}
-	t2 := Date(2013, February, 1, 00, 00, 00, 0, baghdad)
-	if t1 != t2 {
-		t.Fatalf("ParseInLocation(Feb 01 2013 AST, Baghdad) = %v, want %v", t1, t2)
-	}
+
 	_, offset := t1.Zone()
-	if offset != 3*60*60 {
-		t.Fatalf("ParseInLocation(Feb 01 2013 AST, Baghdad).Zone = _, %d, want _, %d", offset, 3*60*60)
+
+	// A zero offset means that ParseInLocation did not recognize the
+	// 'AST' abbreviation as matching the current location (Baghdad,
+	// where we'd expect a +03 hrs offset); likely because we're using
+	// a recent tzdata release (2017a or newer).
+	// If it happens, skip the Baghdad test.
+	if offset != 0 {
+		t2 = Date(2013, February, 1, 00, 00, 00, 0, baghdad)
+		if t1 != t2 {
+			t.Fatalf("ParseInLocation(Feb 01 2013 AST, Baghdad) = %v, want %v", t1, t2)
+		}
+		if offset != 3*60*60 {
+			t.Fatalf("ParseInLocation(Feb 01 2013 AST, Baghdad).Zone = _, %d, want _, %d", offset, 3*60*60)
+		}
 	}
 
 	blancSablon, err := LoadLocation("America/Blanc-Sablon")
@@ -273,6 +291,9 @@ func TestParseInLocation(t *testing.T) {
 		t.Fatal(err)
 	}
 
+	// In this case 'AST' means 'Atlantic Standard Time', and we
+	// expect the abbreviation to correctly match the american
+	// location.
 	t1, err = ParseInLocation("Jan 02 2006 MST", "Feb 01 2013 AST", blancSablon)
 	if err != nil {
 		t.Fatal(err)
diff --git a/test/fixedbugs/bug19403.go b/test/fixedbugs/bug19403.go
new file mode 100644
index 0000000..94c0fb4
--- /dev/null
+++ b/test/fixedbugs/bug19403.go
@@ -0,0 +1,134 @@
+// run
+
+// Copyright 2017 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test for golang.org/issue/19403.
+// F15 should not be clobbered by float-to-int conversion on ARM.
+// This test requires enough locals that can be put in registers that the compiler can choose to use F15.
+package main
+
+var count float32 = 16
+var i0 int
+var i1 int
+var i2 int
+var i3 int
+var i4 int
+var i5 int
+var i6 int
+var i7 int
+var i8 int
+var i9 int
+var i10 int
+var i11 int
+var i12 int
+var i13 int
+var i14 int
+var i15 int
+var i16 int
+
+func main() {
+	var f0 float32 = 0.0
+	var f1 float32 = 1.0
+	var f2 float32 = 2.0
+	var f3 float32 = 3.0
+	var f4 float32 = 4.0
+	var f5 float32 = 5.0
+	var f6 float32 = 6.0
+	var f7 float32 = 7.0
+	var f8 float32 = 8.0
+	var f9 float32 = 9.0
+	var f10 float32 = 10.0
+	var f11 float32 = 11.0
+	var f12 float32 = 12.0
+	var f13 float32 = 13.0
+	var f14 float32 = 14.0
+	var f15 float32 = 15.0
+	var f16 float32 = 16.0
+	i0 = int(f0)
+	i1 = int(f1)
+	i2 = int(f2)
+	i3 = int(f3)
+	i4 = int(f4)
+	i5 = int(f5)
+	i6 = int(f6)
+	i7 = int(f7)
+	i8 = int(f8)
+	i9 = int(f9)
+	i10 = int(f10)
+	i11 = int(f11)
+	i12 = int(f12)
+	i13 = int(f13)
+	i14 = int(f14)
+	i15 = int(f15)
+	i16 = int(f16)
+	if f16 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f15 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f14 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f13 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f12 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f11 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f10 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f9 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f8 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f7 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f6 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f5 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f4 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f3 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f2 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f1 != count {
+		panic("fail")
+	}
+	count -= 1
+	if f0 != count {
+		panic("fail")
+	}
+	count -= 1
+}
diff --git a/test/fixedbugs/issue19137.go b/test/fixedbugs/issue19137.go
new file mode 100644
index 0000000..946f029
--- /dev/null
+++ b/test/fixedbugs/issue19137.go
@@ -0,0 +1,35 @@
+// compile
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 19137: folding address into load/store causes
+// odd offset on ARM64.
+
+package p
+
+type T struct {
+	p *int
+	a [2]byte
+	b [6]byte // not 4-byte aligned
+}
+
+func f(b [6]byte) T {
+	var x [1000]int // a large stack frame
+	_ = x
+	return T{b: b}
+}
+
+// Arg symbol's base address may be not at an aligned offset to
+// SP. Folding arg's address into load/store may cause odd offset.
+func move(a, b [20]byte) [20]byte {
+	var x [1000]int // a large stack frame
+	_ = x
+	return b // b is not 8-byte aligned to SP
+}
+func zero() ([20]byte, [20]byte) {
+	var x [1000]int // a large stack frame
+	_ = x
+	return [20]byte{}, [20]byte{} // the second return value is not 8-byte aligned to SP
+}
diff --git a/test/fixedbugs/issue19168.go b/test/fixedbugs/issue19168.go
new file mode 100644
index 0000000..b94b1d0
--- /dev/null
+++ b/test/fixedbugs/issue19168.go
@@ -0,0 +1,58 @@
+// errorcheck -0 -l -d=wb
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import (
+	"reflect"
+	"unsafe"
+
+	reflect2 "reflect"
+)
+
+func sink(e interface{})
+
+func a(hdr *reflect.SliceHeader, p *byte) {
+	hdr.Data = uintptr(unsafe.Pointer(p)) // ERROR "write barrier"
+}
+
+func b(hdr *reflect.StringHeader, p *byte) {
+	hdr.Data = uintptr(unsafe.Pointer(p)) // ERROR "write barrier"
+}
+
+func c(hdrs *[1]reflect.SliceHeader, p *byte) {
+	hdrs[0].Data = uintptr(unsafe.Pointer(p)) // ERROR "write barrier"
+}
+
+func d(hdr *struct{ s reflect.StringHeader }, p *byte) {
+	hdr.s.Data = uintptr(unsafe.Pointer(p)) // ERROR "write barrier"
+}
+
+func e(p *byte) (resHeap, resStack string) {
+	sink(&resHeap)
+
+	hdr := (*reflect.StringHeader)(unsafe.Pointer(&resHeap))
+	hdr.Data = uintptr(unsafe.Pointer(p)) // ERROR "write barrier"
+
+	// No write barrier for non-escaping stack vars.
+	hdr = (*reflect.StringHeader)(unsafe.Pointer(&resStack))
+	hdr.Data = uintptr(unsafe.Pointer(p))
+
+	return
+}
+
+func f(hdr *reflect2.SliceHeader, p *byte) {
+	hdr.Data = uintptr(unsafe.Pointer(p)) // ERROR "write barrier"
+}
+
+type SliceHeader struct {
+	Data uintptr
+}
+
+func g(hdr *SliceHeader, p *byte) {
+	// No write barrier for lookalike SliceHeader.
+	hdr.Data = uintptr(unsafe.Pointer(p))
+}
diff --git a/test/fixedbugs/issue19182.go b/test/fixedbugs/issue19182.go
new file mode 100644
index 0000000..3a90ff4
--- /dev/null
+++ b/test/fixedbugs/issue19182.go
@@ -0,0 +1,36 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"runtime"
+	"sync/atomic"
+	"time"
+)
+
+var a uint64 = 0
+
+func main() {
+	runtime.GOMAXPROCS(2) // With just 1, infinite loop never yields
+
+	go func() {
+		for {
+			atomic.AddUint64(&a, uint64(1))
+		}
+	}()
+
+	time.Sleep(10 * time.Millisecond) // Short sleep is enough in passing case
+	i, val := 0, atomic.LoadUint64(&a)
+	for ; val == 0 && i < 100; val, i = atomic.LoadUint64(&a), i+1 {
+		time.Sleep(100 * time.Millisecond)
+	}
+	if val == 0 {
+		fmt.Printf("Failed to observe atomic increment after %d tries\n", i)
+	}
+
+}
diff --git a/test/fixedbugs/issue19201.go b/test/fixedbugs/issue19201.go
new file mode 100644
index 0000000..e370d55
--- /dev/null
+++ b/test/fixedbugs/issue19201.go
@@ -0,0 +1,52 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"encoding/binary"
+)
+
+var (
+	ch1 = make(chan int)
+	ch2 = make(chan int)
+
+	bin  = []byte("a\000\000\001")
+	want = binary.BigEndian.Uint32(bin)
+
+	c consumer = noopConsumer{}
+)
+
+type msg struct {
+	code uint32
+}
+
+type consumer interface {
+	consume(msg)
+}
+
+type noopConsumer struct{}
+
+func (noopConsumer) consume(msg) {}
+
+func init() {
+	close(ch1)
+}
+
+func main() {
+	var m msg
+	m.code = binary.BigEndian.Uint32(bin)
+
+	select {
+	case <-ch1:
+		c.consume(m)
+		if m.code != want {
+			// can not use m.code here, or it will work
+			panic("BigEndian read failed")
+		}
+	case <-ch2:
+	}
+}
diff --git a/test/fixedbugs/issue19217.go b/test/fixedbugs/issue19217.go
new file mode 100644
index 0000000..9679406
--- /dev/null
+++ b/test/fixedbugs/issue19217.go
@@ -0,0 +1,39 @@
+// compile
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package foo
+
+import (
+	"encoding/binary"
+)
+
+type DbBuilder struct {
+	arr []int
+}
+
+func (bld *DbBuilder) Finish() error {
+	defer bld.Finish()
+
+	var hash []byte
+	for _, ixw := range bld.arr {
+		for {
+			if ixw != 0 {
+				panic("ixw != 0")
+			}
+			ixw--
+		insertOne:
+			for {
+				for i := 0; i < 1; i++ {
+					if binary.LittleEndian.Uint16(hash[i:]) == 0 {
+						break insertOne
+					}
+				}
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/test/fixedbugs/issue19323.go b/test/fixedbugs/issue19323.go
new file mode 100644
index 0000000..f90af66
--- /dev/null
+++ b/test/fixedbugs/issue19323.go
@@ -0,0 +1,19 @@
+// errorcheck
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g() {}
+
+func f() {
+	g()[:] // ERROR "g.. used as value"
+}
+
+func g2() ([]byte, []byte) { return nil, nil }
+
+func f2() {
+	g2()[:] // ERROR "multiple-value g2.. in single-value context"
+}
diff --git a/test/fixedbugs/issue19743.go b/test/fixedbugs/issue19743.go
new file mode 100644
index 0000000..e57b19c
--- /dev/null
+++ b/test/fixedbugs/issue19743.go
@@ -0,0 +1,31 @@
+// errorcheck -0 -m -l
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package foo
+
+// Escape analysis needs to treat the uintptr-typed reflect.*Header fields as pointers.
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type immutableBytes []byte
+
+// Bug was failure to leak param b.
+func toString(b immutableBytes) string { // ERROR "leaking param: b$"
+	var s string
+	if len(b) == 0 {
+		return s
+	}
+
+	strHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))         // ERROR "toString &s does not escape$"
+	strHeader.Data = (*reflect.SliceHeader)(unsafe.Pointer(&b)).Data // ERROR "toString &b does not escape$"
+
+	l := len(b)
+	strHeader.Len = l
+	return s
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-golang/golang.git



More information about the pkg-golang-commits mailing list