[Pkg-golang-commits] [golang] 02/03: Imported Upstream version 1.5.2

Tianon Gravi tianon at debian.org
Mon Dec 7 01:25:28 UTC 2015


This is an automated email from the git hooks/post-receive script.

tianon pushed a commit to branch debian-sid
in repository golang.

commit f09112129a21e99e3846eb80a788fd228f9f2e27
Author: Tianon Gravi <tianon at debian.org>
Date:   Sun Dec 6 17:09:31 2015 -0800

    Imported Upstream version 1.5.2
---
 VERSION                                      |   2 +-
 doc/conduct.html                             | 273 ++++++++++++++++++++
 doc/contrib.html                             |   8 +-
 doc/devel/release.html                       |  13 +
 doc/help.html                                |   6 +
 doc/install-source.html                      |   4 +-
 misc/cgo/test/cgo_test.go                    |   1 +
 misc/cgo/test/issue12030.go                  |  35 +++
 misc/cgo/test/issue1635.go                   |   5 +
 misc/cgo/test/setgid_linux.go                |  19 +-
 misc/ios/go_darwin_arm_exec.go               | 366 +++++++++++----------------
 src/cmd/cgo/gcc.go                           |   5 +
 src/cmd/compile/internal/gc/const.go         |  16 +-
 src/cmd/compile/internal/gc/order.go         |   5 +-
 src/cmd/compile/internal/gc/typecheck.go     |   9 +-
 src/cmd/compile/internal/ppc64/reg.go        |   2 +-
 src/cmd/dist/build.go                        |   1 +
 src/cmd/go/note.go                           |  72 +++++-
 src/cmd/go/note_test.go                      |  23 +-
 src/cmd/go/pkg.go                            |  27 +-
 src/cmd/internal/obj/ppc64/asm9.go           |   2 +-
 src/cmd/link/internal/amd64/asm.go           |   2 +-
 src/cmd/link/internal/ld/elf.go              |  32 ++-
 src/cmd/link/internal/ld/ldelf.go            |   3 +
 src/cmd/link/internal/ld/lib.go              |   4 +-
 src/cmd/link/internal/x86/asm.go             |   2 +-
 src/mime/multipart/multipart.go              |  16 +-
 src/mime/multipart/multipart_test.go         |  48 ++++
 src/net/interface_windows.go                 |   2 +-
 src/net/net_windows_test.go                  |  53 ++++
 src/runtime/asm_amd64.s                      |  27 +-
 src/runtime/asm_amd64p32.s                   |   9 +-
 src/runtime/cgo/cgo.go                       |   4 +-
 src/runtime/malloc.go                        |   7 +-
 src/runtime/mem_linux.go                     |  94 +++++--
 src/runtime/memclr_386.s                     |  11 +-
 src/runtime/memclr_amd64.s                   |   9 +-
 src/runtime/memclr_arm64.s                   |  29 ++-
 src/runtime/memclr_plan9_386.s               |  11 +-
 src/runtime/memclr_ppc64x.s                  |  17 +-
 src/runtime/memmove_386.s                    |  14 +-
 src/runtime/memmove_amd64.s                  |  10 +-
 src/runtime/memmove_arm64.s                  |  81 +++++-
 src/runtime/memmove_nacl_amd64p32.s          |  26 +-
 src/runtime/memmove_plan9_386.s              |  14 +-
 src/runtime/memmove_plan9_amd64.s            |  10 +-
 src/runtime/memmove_ppc64x.s                 |  78 ++++--
 src/runtime/memmove_test.go                  |  35 +++
 src/runtime/mgcmark.go                       |  71 +++++-
 src/runtime/os1_darwin.go                    |  17 +-
 src/runtime/os1_dragonfly.go                 |  16 +-
 src/runtime/os1_freebsd.go                   |  17 +-
 src/runtime/os1_linux.go                     |  20 +-
 src/runtime/os1_nacl.go                      |   9 +
 src/runtime/os1_netbsd.go                    |  18 +-
 src/runtime/os1_openbsd.go                   |  17 +-
 src/runtime/os1_plan9.go                     |   6 +
 src/runtime/os1_windows.go                   |  10 +
 src/runtime/os3_solaris.go                   |  18 +-
 src/runtime/os_solaris.go                    |  28 +-
 src/runtime/proc1.go                         |  56 ++--
 src/runtime/race/testdata/issue12225_test.go |  13 +
 src/runtime/rt0_darwin_arm.s                 |  29 ++-
 src/runtime/signal_linux.go                  |   4 +-
 src/runtime/stack1.go                        |   6 +
 src/runtime/sys_solaris_amd64.s              |   4 +
 src/runtime/traceback.go                     |  40 ++-
 test/fixedbugs/issue11987.go                 |  23 ++
 test/fixedbugs/issue12686.go                 |  16 ++
 test/fixedbugs/issue13160.go                 |  70 +++++
 70 files changed, 1619 insertions(+), 431 deletions(-)

diff --git a/VERSION b/VERSION
index 77af434..033d96c 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-go1.5.1
\ No newline at end of file
+go1.5.2
\ No newline at end of file
diff --git a/doc/conduct.html b/doc/conduct.html
new file mode 100644
index 0000000..bfd2904
--- /dev/null
+++ b/doc/conduct.html
@@ -0,0 +1,273 @@
+<!--{
+	"Title": "Go Community Code of Conduct",
+	"Path":  "/conduct",
+	"Template": true
+}-->
+
+<style>
+ul {
+	max-width: 800px;
+}
+ul ul {
+	margin: 0 0 5px;
+}
+</style>
+
+<h2>About the Code of Conduct</h2>
+
+<h3>Why have a Code of Conduct?</h3>
+
+<p>
+Online communities include people from many different backgrounds.
+The Go contributors are committed to providing a friendly, safe and welcoming
+environment for all, regardless of age, disability, gender, nationality, race,
+religion, sexuality, or similar personal characteristic.
+</p>
+
+<p>
+The first goal of the Code of Conduct is to specify a baseline standard
+of behavior so that people with different social values and communication
+styles can talk about Go effectively, productively, and respectfully.
+</p>
+
+<p>
+The second goal is to provide a mechanism for resolving conflicts in the
+community when they arise.
+</p>
+
+<p>
+The third goal of the Code of Conduct is to make our community welcoming to
+people from different backgrounds.
+Diversity is critical to the project; for Go to be successful, it needs
+contributors and users from all backgrounds.
+(See <a href="https://blog.golang.org/open-source">Go, Open Source, Community</a>.)
+</p>
+
+<p>
+With that said, a healthy community must allow for disagreement and debate.
+The Code of Conduct is not a mechanism for people to silence others with whom
+they disagree.
+</p>
+
+<h3>Where does the Code of Conduct apply?</h3>
+
+<p>
+If you participate in or contribute to the Go ecosystem in any way,
+you are encouraged to follow the Code of Conduct while doing so.
+</p>
+
+<p>
+Explicit enforcement of the Code of Conduct applies to the
+official forums operated by the Go project (“Go spaces”):
+</p>
+
+<ul>
+    <li>The official <a href="https://github.com/golang/">GitHub projects</a>
+        and <a href="https://go-review.googlesource.com/">code reviews</a>.
+    <li>The <a href="https://groups.google.com/group/golang-nuts">golang-nuts</a> and
+        <a href="https://groups.google.com/group/golang-dev">golang-dev</a> mailing lists.
+<li>The #go-nuts IRC channel on Freenode.
+<li>The <a href="https://reddit.com/r/golang">/r/golang subreddit</a>.
+</ul>
+
+<p>
+Other Go groups (such as conferences, meetups, and other unofficial forums) are
+encouraged to adopt this Code of Conduct. Those groups must provide their own
+moderators and/or working group (see below).
+</p>
+
+<h2>Gopher values</h2>
+
+<p>
+These are the values to which people in the Go community (“Gophers”) should aspire.
+</p>
+
+<ul>
+<li>Be friendly and welcoming
+<li>Be patient
+    <ul>
+    <li>Remember that people have varying communication styles and that not
+        everyone is using their native language.
+        (Meaning and tone can be lost in translation.)
+    </ul>
+<li>Be thoughtful
+    <ul>
+    <li>Productive communication requires effort.
+        Think about how your words will be interpreted.
+    <li>Remember that sometimes it is best to refrain entirely from commenting.
+    </ul>
+<li>Be respectful
+    <ul>
+    <li>In particular, respect differences of opinion.
+    </ul>
+<li>Be charitable
+    <ul>
+    <li>Interpret the arguments of others in good faith, do not seek to disagree.
+    <li>When we do disagree, try to understand why.
+    </ul>
+<li>Avoid destructive behavior:
+    <ul>
+    <li>Derailing: stay on topic; if you want to talk about something else,
+        start a new conversation.
+    <li>Unconstructive criticism: don't merely decry the current state of affairs;
+        offer—or at least solicit—suggestions as to how things may be improved.
+    <li>Snarking (pithy, unproductive, sniping comments)
+    <li>Discussing potentially offensive or sensitive issues;
+        this all too often leads to unnecessary conflict.
+    <li>Microaggressions: brief and commonplace verbal, behavioral and
+        environmental indignities that communicate hostile, derogatory or negative
+        slights and insults to a person or group.
+    </ul>
+</ul>
+
+<p>
+People are complicated.
+You should expect to be misunderstood and to misunderstand others;
+when this inevitably occurs, resist the urge to be defensive or assign blame.
+Try not to take offense where no offense was intended.
+Give people the benefit of the doubt.
+Even if the intent was to provoke, do not rise to it.
+It is the responsibility of <i>all parties</i> to de-escalate conflict when it arises.
+</p>
+
+<h2>Unwelcome behavior</h2>
+
+<p>
+These actions are explicitly forbidden in Go spaces:
+</p>
+
+<ul>
+<li>Insulting, demeaning, hateful, or threatening remarks.
+<li>Discrimination based on age, disability, gender, nationality, race,
+    religion, sexuality, or similar personal characteristic.
+<li>Bullying or systematic harassment.
+<li>Unwelcome sexual advances.
+<li>Incitement to any of these.
+</ul>
+
+<h2>Moderation</h2>
+
+<p>
+The Go spaces are not free speech venues; they are for discussion about Go.
+These spaces have moderators.
+The goal of the moderators is to facilitate civil discussion about Go.
+</p>
+
+<p>
+When using the official Go spaces you should act in the spirit of the “Gopher
+values”.
+If you conduct yourself in a way that is explicitly forbidden by the CoC,
+you will be warned and asked to stop.
+If you do not stop, you will be removed from our community spaces temporarily.
+Repeated, wilful breaches of the CoC will result in a permanent ban.
+</p>
+
+<p>
+Moderators are held to a higher standard than other community members.
+If a moderator creates an inappropriate situation, they should expect less
+leeway than others, and should expect to be removed from their position if they
+cannot adhere to the CoC.
+</p>
+
+<p>
+Complaints about moderator actions must be handled using the reporting process
+below.
+</p>
+
+<h2>Reporting issues</h2>
+
+<p>
+The Code of Conduct Working Group is a group of people that represent the Go
+community. They are responsible for handling conduct-related issues.
+Their purpose is to de-escalate conflicts and try to resolve issues to the
+satisfaction of all parties. They are:
+</p>
+
+<ul>
+	<li>Aditya Mukerjee <dev at chimeracoder.net>
+	<li>Andrew Gerrand <adg at golang.org>
+	<li>Dave Cheney <dave at cheney.net>
+	<li>Jason Buberel <jbuberel at google.com>
+	<li>Peggy Li <peggyli.224 at gmail.com>
+	<li>Sarah Adams <sadams.codes at gmail.com>
+	<li>Steve Francia <steve.francia at gmail.com>
+	<li>Verónica López <gveronicalg at gmail.com>
+</ul>
+
+<p>
+If you encounter a conduct-related issue, you should report it to the
+Working Group using the process described below.
+<b>Do not</b> post about the issue publicly or try to rally sentiment against a
+particular individual or group.
+</p>
+
+<ul>
+<li>Mail <a href="mailto:conduct at golang.org">conduct at golang.org</a> or
+    <a href="https://golang.org/s/conduct-report">submit an anonymous report</a>.
+    <ul>
+    <li>Your message will reach the Working Group.
+    <li>Reports are confidential within the Working Group.
+    <li>Should you choose to remain anonymous then the Working Group cannot
+        notify you of the outcome of your report.
+    <li>You may contact a member of the group directly if you do not feel
+        comfortable contacting the group as a whole. That member will then raise
+        the issue with the Working Group as a whole, preserving the privacy of the
+        reporter (if desired).
+    <li>If your report concerns a member of the Working Group they will be recused
+        from Working Group discussions of the report.
+    <li>The Working Group will strive to handle reports with discretion and
+        sensitivity, to protect the privacy of the involved parties,
+        and to avoid conflicts of interest.
+    </ul>
+<li>You should receive a response within 48 hours (likely sooner).
+    (Should you choose to contact a single Working Group member,
+    it may take longer to receive a response.)
+<li>The Working Group will meet to review the incident and determine what happened.
+    <ul>
+    <li>With the permission of person reporting the incident, the Working Group
+        may reach out to other community members for more context.
+    </ul>
+<li>The Working Group will reach a decision as to how to act. These may include:
+    <ul>
+    <li>Nothing.
+    <li>A request for a private or public apology.
+    <li>A private or public warning.
+    <li>An imposed vacation (for instance, asking someone to abstain for a week
+        from a mailing list or IRC).
+    <li>A permanent or temporary ban from some or all Go spaces.
+    </ul>
+<li>The Working Group will reach out to the original reporter to let them know
+    the decision.
+<li>Appeals to the decision may be made to the Working Group,
+    or to any of its members directly.
+</ul>
+
+<p>
+<b>Note that the goal of the Code of Conduct and the Working Group is to resolve
+conflicts in the most harmonious way possible.</b>
+We hope that in most cases issues may be resolved through polite discussion and
+mutual agreement.
+Bannings and other forceful measures are to be employed only as a last resort.
+</p>
+
+<p>
+Changes to the Code of Conduct (including to the members of the Working Group)
+should be proposed using the
+<a href="https://golang.org/s/proposal-process">change proposal process</a>.
+</p>
+
+<h2>Summary</h2>
+
+<ul>
+<li>Treat everyone with respect and kindness.
+<li>Be thoughtful in how you communicate.
+<li>Don’t be destructive or inflammatory.
+<li>If you encounter an issue, please mail <a href="mailto:conduct at golang.org">conduct at golang.org</a>.
+</ul>
+
+<h3>Acknowledgements</h3>
+
+<p>
+Parts of this document were derived from the Code of Conduct documents of the
+Django, FreeBSD, and Rust projects.
+</p>
diff --git a/doc/contrib.html b/doc/contrib.html
index f5f6f68..66e5af9 100644
--- a/doc/contrib.html
+++ b/doc/contrib.html
@@ -94,10 +94,16 @@ We pride ourselves on being meticulous; no issue is too small.
 
 <p>
 Security-related issues should be reported to
-<a href="mailto:security at golang.org">security at golang.org</a>.
+<a href="mailto:security at golang.org">security at golang.org</a>.<br>
 See the <a href="/security">security policy</a> for more details.
 </p>
 
+<p>
+Community-related issues should be reported to
+<a href="mailto:conduct at golang.org">conduct at golang.org</a>.<br>
+See the <a href="/conduct">Code of Conduct</a> for more details.
+</p>
+
 <h3><a href="/doc/contribute.html">Contributing code</a></h3>
 
 <p>
diff --git a/doc/devel/release.html b/doc/devel/release.html
index 43ead08..56da4d0 100644
--- a/doc/devel/release.html
+++ b/doc/devel/release.html
@@ -22,6 +22,14 @@ Read the <a href="/doc/go1.5">Go 1.5 Release Notes</a> for more information.
 <h3 id="go1.5.minor">Minor revisions</h3>
 
 <p>
+go1.5.2 (released 2015/12/02) includes bug fixes to the compiler, linker, and
+the <code>mime/multipart</code>, <code>net</code>, and <code>runtime</code>
+packages.
+See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.5.2">Go
+1.5.2 milestone</a> on our issue tracker for details.
+</p>
+
+<p>
 go1.5.1 (released 2015/09/08) includes bug fixes to the compiler, assembler, and
 the <code>fmt</code>, <code>net/textproto</code>, <code>net/http</code>, and
 <code>runtime</code> packages.
@@ -48,6 +56,11 @@ go1.4.2 (released 2015/02/17) includes bug fixes to the <code>go</code> command,
 See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.4.2">Go 1.4.2 milestone on our issue tracker</a> for details.
 </p>
 
+<p>
+go1.4.3 (released 2015/09/22) includes security fixes to the <code>net/http</code> package and bug fixes to the <code>runtime</code> package.
+See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.4.3">Go 1.4.3 milestone on our issue tracker</a> for details.
+</p>
+
 <h2 id="go1.3">go1.3 (released 2014/06/18)</h2>
 
 <p>
diff --git a/doc/help.html b/doc/help.html
index 2cc4780..979d7bc 100644
--- a/doc/help.html
+++ b/doc/help.html
@@ -48,3 +48,9 @@ Each month in places around the world, groups of Go programmers ("gophers")
 meet to talk about Go. Find a chapter near you.
 </p>
 
+<h3 id="conduct"><a href="/conduct">Code of Conduct</a></h3>
+<p>
+Guidelines for participating in Go community spaces
+and a reporting process for handling issues.
+</p>
+
diff --git a/doc/install-source.html b/doc/install-source.html
index e71fff7..bffaa27 100644
--- a/doc/install-source.html
+++ b/doc/install-source.html
@@ -167,7 +167,7 @@ Then clone the repository and check out the latest release tag:</p>
 <pre>
 $ git clone https://go.googlesource.com/go
 $ cd go
-$ git checkout go1.5
+$ git checkout go1.5.2
 </pre>
 
 <h2 id="head">(Optional) Switch to the master branch</h2>
@@ -346,7 +346,7 @@ New releases are announced on the
 <a href="//groups.google.com/group/golang-announce">golang-announce</a>
 mailing list.
 Each announcement mentions the latest release tag, for instance,
-<code>go1.5</code>.
+<code>go1.5.2</code>.
 </p>
 
 <p>
diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go
index 9af31e8..4060338 100644
--- a/misc/cgo/test/cgo_test.go
+++ b/misc/cgo/test/cgo_test.go
@@ -65,5 +65,6 @@ func Test9026(t *testing.T)                  { test9026(t) }
 func Test9557(t *testing.T)                  { test9557(t) }
 func Test10303(t *testing.T)                 { test10303(t, 10) }
 func Test11925(t *testing.T)                 { test11925(t) }
+func Test12030(t *testing.T)                 { test12030(t) }
 
 func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
diff --git a/misc/cgo/test/issue12030.go b/misc/cgo/test/issue12030.go
new file mode 100644
index 0000000..ef8f86f
--- /dev/null
+++ b/misc/cgo/test/issue12030.go
@@ -0,0 +1,35 @@
+// Copyright 2015 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 12030. sprintf is defined in both ntdll and msvcrt,
+// Normally we want the one in the msvcrt.
+
+package cgotest
+
+/*
+#include <stdio.h>
+#include <stdlib.h>
+void issue12030conv(char *buf, double x) {
+	sprintf(buf, "d=%g", x);
+}
+*/
+import "C"
+
+import (
+	"fmt"
+	"testing"
+	"unsafe"
+)
+
+func test12030(t *testing.T) {
+	buf := (*C.char)(C.malloc(256))
+	defer C.free(unsafe.Pointer(buf))
+	for _, f := range []float64{1.0, 2.0, 3.14} {
+		C.issue12030conv(buf, C.double(f))
+		got := C.GoString(buf)
+		if want := fmt.Sprintf("d=%g", f); got != want {
+			t.Fatalf("C.sprintf failed for %g: %q != %q", f, got, want)
+		}
+	}
+}
diff --git a/misc/cgo/test/issue1635.go b/misc/cgo/test/issue1635.go
index 6bfe110..cc4be90 100644
--- a/misc/cgo/test/issue1635.go
+++ b/misc/cgo/test/issue1635.go
@@ -14,6 +14,11 @@ void scatter() {
 	printf("scatter = %p\n", p);
 }
 
+// Adding this explicit extern declaration makes this a test for
+// https://gcc.gnu.org/PR68072 aka https://golang.org/issue/13344 .
+// It used to cause a cgo error when building with GCC 6.
+extern int hola;
+
 // this example is in issue 3253
 int hola = 0;
 int testHola() { return hola; }
diff --git a/misc/cgo/test/setgid_linux.go b/misc/cgo/test/setgid_linux.go
index 197f01f..ca95e08 100644
--- a/misc/cgo/test/setgid_linux.go
+++ b/misc/cgo/test/setgid_linux.go
@@ -14,11 +14,14 @@ package cgotest
 import "C"
 
 import (
+	"os"
+	"os/signal"
+	"syscall"
 	"testing"
 	"time"
 )
 
-func testSetgid(t *testing.T) {
+func runTestSetgid() bool {
 	c := make(chan bool)
 	go func() {
 		C.setgid(0)
@@ -26,7 +29,21 @@ func testSetgid(t *testing.T) {
 	}()
 	select {
 	case <-c:
+		return true
 	case <-time.After(5 * time.Second):
+		return false
+	}
+
+}
+
+func testSetgid(t *testing.T) {
+	if !runTestSetgid() {
 		t.Error("setgid hung")
 	}
+
+	// Now try it again after using signal.Notify.
+	signal.Notify(make(chan os.Signal, 1), syscall.SIGINT)
+	if !runTestSetgid() {
+		t.Error("setgid hung after signal.Notify")
+	}
 }
diff --git a/misc/ios/go_darwin_arm_exec.go b/misc/ios/go_darwin_arm_exec.go
index debd2cd..3131b15 100644
--- a/misc/ios/go_darwin_arm_exec.go
+++ b/misc/ios/go_darwin_arm_exec.go
@@ -160,9 +160,6 @@ func run(bin string, args []string) (err error) {
 	}
 	defer os.Chdir(oldwd)
 
-	type waitPanic struct {
-		err error
-	}
 	defer func() {
 		if r := recover(); r != nil {
 			if w, ok := r.(waitPanic); ok {
@@ -174,14 +171,96 @@ func run(bin string, args []string) (err error) {
 	}()
 
 	defer exec.Command("killall", "ios-deploy").Run() // cleanup
-
 	exec.Command("killall", "ios-deploy").Run()
 
 	var opts options
 	opts, args = parseArgs(args)
 
 	// ios-deploy invokes lldb to give us a shell session with the app.
-	cmd = exec.Command(
+	s, err := newSession(appdir, args, opts)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		b := s.out.Bytes()
+		if err == nil && !debug {
+			i := bytes.Index(b, []byte("(lldb) process continue"))
+			if i > 0 {
+				b = b[i:]
+			}
+		}
+		os.Stdout.Write(b)
+	}()
+
+	// Script LLDB. Oh dear.
+	s.do(`process handle SIGHUP  --stop false --pass true --notify false`)
+	s.do(`process handle SIGPIPE --stop false --pass true --notify false`)
+	s.do(`process handle SIGUSR1 --stop false --pass true --notify false`)
+	s.do(`process handle SIGSEGV --stop false --pass true --notify false`) // does not work
+	s.do(`process handle SIGBUS  --stop false --pass true --notify false`) // does not work
+
+	if opts.lldb {
+		_, err := io.Copy(s.in, os.Stdin)
+		if err != io.EOF {
+			return err
+		}
+		return nil
+	}
+
+	s.do(`breakpoint set -n getwd`) // in runtime/cgo/gcc_darwin_arm.go
+
+	s.doCmd("run", "stop reason = breakpoint", 20*time.Second)
+
+	// Move the current working directory into the faux gopath.
+	if pkgpath != "src" {
+		s.do(`breakpoint delete 1`)
+		s.do(`expr char* $mem = (char*)malloc(512)`)
+		s.do(`expr $mem = (char*)getwd($mem, 512)`)
+		s.do(`expr $mem = (char*)strcat($mem, "/` + pkgpath + `")`)
+		s.do(`call (void)chdir($mem)`)
+	}
+
+	startTestsLen := s.out.Len()
+	fmt.Fprintln(s.in, `process continue`)
+
+	passed := func(out *buf) bool {
+		// Just to make things fun, lldb sometimes translates \n into \r\n.
+		return s.out.LastIndex([]byte("\nPASS\n")) > startTestsLen ||
+			s.out.LastIndex([]byte("\nPASS\r")) > startTestsLen ||
+			s.out.LastIndex([]byte("\n(lldb) PASS\n")) > startTestsLen ||
+			s.out.LastIndex([]byte("\n(lldb) PASS\r")) > startTestsLen
+	}
+	err = s.wait("test completion", passed, opts.timeout)
+	if passed(s.out) {
+		// The returned lldb error code is usually non-zero.
+		// We check for test success by scanning for the final
+		// PASS returned by the test harness, assuming the worst
+		// in its absence.
+		return nil
+	}
+	return err
+}
+
+type lldbSession struct {
+	cmd      *exec.Cmd
+	in       *os.File
+	out      *buf
+	timedout chan struct{}
+	exited   chan error
+}
+
+func newSession(appdir string, args []string, opts options) (*lldbSession, error) {
+	lldbr, in, err := os.Pipe()
+	if err != nil {
+		return nil, err
+	}
+	s := &lldbSession{
+		in:     in,
+		out:    new(buf),
+		exited: make(chan error),
+	}
+
+	s.cmd = exec.Command(
 		// lldb tries to be clever with terminals.
 		// So we wrap it in script(1) and be clever
 		// right back at it.
@@ -198,267 +277,120 @@ func run(bin string, args []string) (err error) {
 		"--bundle", appdir,
 	)
 	if debug {
-		log.Println(strings.Join(cmd.Args, " "))
+		log.Println(strings.Join(s.cmd.Args, " "))
 	}
 
-	lldbr, lldb, err := os.Pipe()
-	if err != nil {
-		return err
-	}
-	w := new(bufWriter)
+	var out io.Writer = s.out
 	if opts.lldb {
-		mw := io.MultiWriter(w, os.Stderr)
-		cmd.Stdout = mw
-		cmd.Stderr = mw
-	} else {
-		cmd.Stdout = w
-		cmd.Stderr = w // everything of interest is on stderr
+		out = io.MultiWriter(out, os.Stderr)
 	}
-	cmd.Stdin = lldbr
+	s.cmd.Stdout = out
+	s.cmd.Stderr = out // everything of interest is on stderr
+	s.cmd.Stdin = lldbr
 
-	if err := cmd.Start(); err != nil {
-		return fmt.Errorf("ios-deploy failed to start: %v", err)
+	if err := s.cmd.Start(); err != nil {
+		return nil, fmt.Errorf("ios-deploy failed to start: %v", err)
 	}
 
 	// Manage the -test.timeout here, outside of the test. There is a lot
 	// of moving parts in an iOS test harness (notably lldb) that can
 	// swallow useful stdio or cause its own ruckus.
-	var timedout chan struct{}
 	if opts.timeout > 1*time.Second {
-		timedout = make(chan struct{})
+		s.timedout = make(chan struct{})
 		time.AfterFunc(opts.timeout-1*time.Second, func() {
-			close(timedout)
+			close(s.timedout)
 		})
 	}
 
-	exited := make(chan error)
 	go func() {
-		exited <- cmd.Wait()
+		s.exited <- s.cmd.Wait()
 	}()
 
-	waitFor := func(stage, str string, timeout time.Duration) error {
-		select {
-		case <-timedout:
-			w.printBuf()
-			if p := cmd.Process; p != nil {
-				p.Kill()
-			}
-			return fmt.Errorf("timeout (stage %s)", stage)
-		case err := <-exited:
-			w.printBuf()
-			return fmt.Errorf("failed (stage %s): %v", stage, err)
-		case i := <-w.find(str, timeout):
-			if i < 0 {
-				log.Printf("timed out on stage %q, retrying", stage)
-				return errRetry
-			}
-			w.clearTo(i + len(str))
-			return nil
-		}
+	cond := func(out *buf) bool {
+		i0 := s.out.LastIndex([]byte("(lldb)"))
+		i1 := s.out.LastIndex([]byte("fruitstrap"))
+		i2 := s.out.LastIndex([]byte(" connect"))
+		return i0 > 0 && i1 > 0 && i2 > 0
 	}
-	do := func(cmd string) {
-		fmt.Fprintln(lldb, cmd)
-		if err := waitFor(fmt.Sprintf("prompt after %q", cmd), "(lldb)", 0); err != nil {
-			panic(waitPanic{err})
-		}
+	if err := s.wait("lldb start", cond, 5*time.Second); err != nil {
+		fmt.Printf("lldb start error: %v\n", err)
+		return nil, errRetry
 	}
+	return s, nil
+}
 
-	// Wait for installation and connection.
-	if err := waitFor("ios-deploy before run", "(lldb)", 0); err != nil {
-		// Retry if we see a rare and longstanding ios-deploy bug.
-		// https://github.com/phonegap/ios-deploy/issues/11
-		//	Assertion failed: (AMDeviceStartService(device, CFSTR("com.apple.debugserver"), &gdbfd, NULL) == 0)
-		log.Printf("%v, retrying", err)
-		return errRetry
-	}
+func (s *lldbSession) do(cmd string) { s.doCmd(cmd, "(lldb)", 0) }
 
-	// Script LLDB. Oh dear.
-	do(`process handle SIGHUP  --stop false --pass true --notify false`)
-	do(`process handle SIGPIPE --stop false --pass true --notify false`)
-	do(`process handle SIGUSR1 --stop false --pass true --notify false`)
-	do(`process handle SIGSEGV --stop false --pass true --notify false`) // does not work
-	do(`process handle SIGBUS  --stop false --pass true --notify false`) // does not work
-
-	if opts.lldb {
-		_, err := io.Copy(lldb, os.Stdin)
-		if err != io.EOF {
-			return err
-		}
-		return nil
+func (s *lldbSession) doCmd(cmd string, waitFor string, extraTimeout time.Duration) {
+	startLen := s.out.Len()
+	fmt.Fprintln(s.in, cmd)
+	cond := func(out *buf) bool {
+		i := s.out.LastIndex([]byte(waitFor))
+		return i > startLen
 	}
-
-	do(`breakpoint set -n getwd`) // in runtime/cgo/gcc_darwin_arm.go
-
-	fmt.Fprintln(lldb, `run`)
-	if err := waitFor("br getwd", "stop reason = breakpoint", 20*time.Second); err != nil {
-		// At this point we see several flaky errors from the iOS
-		// build infrastructure. The most common is never reaching
-		// the breakpoint, which we catch with a timeout. Very
-		// occasionally lldb can produce errors like:
-		//
-		//	Breakpoint 1: no locations (pending).
-		//	WARNING:  Unable to resolve breakpoint to any actual locations.
-		//
-		// As no actual test code has been executed by this point,
-		// we treat all errors as recoverable.
-		if err != errRetry {
-			log.Printf("%v, retrying", err)
-			err = errRetry
-		}
-		return err
-	}
-	if err := waitFor("br getwd prompt", "(lldb)", 0); err != nil {
-		return err
+	if err := s.wait(fmt.Sprintf("running cmd %q", cmd), cond, extraTimeout); err != nil {
+		panic(waitPanic{err})
 	}
+}
 
-	// Move the current working directory into the faux gopath.
-	if pkgpath != "src" {
-		do(`breakpoint delete 1`)
-		do(`expr char* $mem = (char*)malloc(512)`)
-		do(`expr $mem = (char*)getwd($mem, 512)`)
-		do(`expr $mem = (char*)strcat($mem, "/` + pkgpath + `")`)
-		do(`call (void)chdir($mem)`)
-	}
-
-	// Run the tests.
-	w.trimSuffix("(lldb) ")
-	fmt.Fprintln(lldb, `process continue`)
-
-	// Wait for the test to complete.
-	select {
-	case <-timedout:
-		w.printBuf()
-		if p := cmd.Process; p != nil {
-			p.Kill()
-		}
-		return errors.New("timeout running tests")
-	case <-w.find("\nPASS", 0):
-		passed := w.isPass()
-		w.printBuf()
-		if passed {
-			return nil
-		}
-		return errors.New("test failure")
-	case err := <-exited:
-		// The returned lldb error code is usually non-zero.
-		// We check for test success by scanning for the final
-		// PASS returned by the test harness, assuming the worst
-		// in its absence.
-		if w.isPass() {
-			err = nil
-		} else if err == nil {
-			err = errors.New("test failure")
+func (s *lldbSession) wait(reason string, cond func(out *buf) bool, extraTimeout time.Duration) error {
+	doTimeout := 1*time.Second + extraTimeout
+	doTimedout := time.After(doTimeout)
+	for {
+		select {
+		case <-s.timedout:
+			if p := s.cmd.Process; p != nil {
+				p.Kill()
+			}
+			return fmt.Errorf("test timeout (%s)", reason)
+		case <-doTimedout:
+			return fmt.Errorf("command timeout (%s for %v)", reason, doTimeout)
+		case err := <-s.exited:
+			return fmt.Errorf("exited (%s: %v)", reason, err)
+		default:
+			if cond(s.out) {
+				return nil
+			}
+			time.Sleep(20 * time.Millisecond)
 		}
-		w.printBuf()
-		return err
 	}
 }
 
-type bufWriter struct {
-	mu     sync.Mutex
-	buf    []byte
-	suffix []byte // remove from each Write
-
-	findTxt   []byte   // search buffer on each Write
-	findCh    chan int // report find position
-	findAfter *time.Timer
+type buf struct {
+	mu  sync.Mutex
+	buf []byte
 }
 
-func (w *bufWriter) Write(in []byte) (n int, err error) {
+func (w *buf) Write(in []byte) (n int, err error) {
 	w.mu.Lock()
 	defer w.mu.Unlock()
-
-	n = len(in)
-	in = bytes.TrimSuffix(in, w.suffix)
-
-	if debug {
-		inTxt := strings.Replace(string(in), "\n", "\\n", -1)
-		findTxt := strings.Replace(string(w.findTxt), "\n", "\\n", -1)
-		fmt.Printf("debug --> %s <-- debug (findTxt='%s')\n", inTxt, findTxt)
-	}
-
 	w.buf = append(w.buf, in...)
-
-	if len(w.findTxt) > 0 {
-		if i := bytes.Index(w.buf, w.findTxt); i >= 0 {
-			w.findCh <- i
-			close(w.findCh)
-			w.findTxt = nil
-			w.findCh = nil
-			if w.findAfter != nil {
-				w.findAfter.Stop()
-				w.findAfter = nil
-			}
-		}
-	}
-	return n, nil
+	return len(in), nil
 }
 
-func (w *bufWriter) trimSuffix(p string) {
+func (w *buf) LastIndex(sep []byte) int {
 	w.mu.Lock()
 	defer w.mu.Unlock()
-	w.suffix = []byte(p)
+	return bytes.LastIndex(w.buf, sep)
 }
 
-func (w *bufWriter) printBuf() {
+func (w *buf) Bytes() []byte {
 	w.mu.Lock()
 	defer w.mu.Unlock()
-	fmt.Fprintf(os.Stderr, "%s", w.buf)
-	w.buf = nil
-}
 
-func (w *bufWriter) clearTo(i int) {
-	w.mu.Lock()
-	defer w.mu.Unlock()
-	w.buf = w.buf[i:]
+	b := make([]byte, len(w.buf))
+	copy(b, w.buf)
+	return b
 }
 
-// find returns a channel that will have exactly one byte index sent
-// to it when the text str appears in the buffer. If the text does not
-// appear before timeout, -1 is sent.
-//
-// A timeout of zero means no timeout.
-func (w *bufWriter) find(str string, timeout time.Duration) <-chan int {
+func (w *buf) Len() int {
 	w.mu.Lock()
 	defer w.mu.Unlock()
-	if len(w.findTxt) > 0 {
-		panic(fmt.Sprintf("find(%s): already trying to find %s", str, w.findTxt))
-	}
-	txt := []byte(str)
-	ch := make(chan int, 1)
-	if i := bytes.Index(w.buf, txt); i >= 0 {
-		ch <- i
-		close(ch)
-	} else {
-		w.findTxt = txt
-		w.findCh = ch
-		if timeout > 0 {
-			w.findAfter = time.AfterFunc(timeout, func() {
-				w.mu.Lock()
-				defer w.mu.Unlock()
-				if w.findCh == ch {
-					w.findTxt = nil
-					w.findCh = nil
-					w.findAfter = nil
-					ch <- -1
-					close(ch)
-				}
-			})
-		}
-	}
-	return ch
+	return len(w.buf)
 }
 
-func (w *bufWriter) isPass() bool {
-	w.mu.Lock()
-	defer w.mu.Unlock()
-
-	// The final stdio of lldb is non-deterministic, so we
-	// scan the whole buffer.
-	//
-	// Just to make things fun, lldb sometimes translates \n
-	// into \r\n.
-	return bytes.Contains(w.buf, []byte("\nPASS\n")) || bytes.Contains(w.buf, []byte("\nPASS\r"))
+type waitPanic struct {
+	err error
 }
 
 type options struct {
diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go
index b64849a..e0b89ec 100644
--- a/src/cmd/cgo/gcc.go
+++ b/src/cmd/cgo/gcc.go
@@ -490,6 +490,11 @@ func (p *Package) loadDWARF(f *File, names []*Name) {
 			name, _ := e.Val(dwarf.AttrName).(string)
 			typOff, _ := e.Val(dwarf.AttrType).(dwarf.Offset)
 			if name == "" || typOff == 0 {
+				if e.Val(dwarf.AttrSpecification) != nil {
+					// Since we are reading all the DWARF,
+					// assume we will see the variable elsewhere.
+					break
+				}
 				fatalf("malformed DWARF TagVariable entry")
 			}
 			if !strings.HasPrefix(name, "__cgo__") {
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index 9eb4983..5095e5e 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -1279,20 +1279,28 @@ func defaultlit(np **Node, t *Type) {
 	return
 
 num:
+	// Note: n.Val().Ctype() can be CTxxx (not a constant) here
+	// in the case of an untyped non-constant value, like 1<<i.
+	v1 := n.Val()
 	if t != nil {
 		if Isint[t.Etype] {
 			t1 = t
-			n.SetVal(toint(n.Val()))
+			v1 = toint(n.Val())
 		} else if Isfloat[t.Etype] {
 			t1 = t
-			n.SetVal(toflt(n.Val()))
+			v1 = toflt(n.Val())
 		} else if Iscomplex[t.Etype] {
 			t1 = t
-			n.SetVal(tocplx(n.Val()))
+			v1 = tocplx(n.Val())
+		}
+		if n.Val().Ctype() != CTxxx {
+			n.SetVal(v1)
 		}
 	}
 
-	overflow(n.Val(), t1)
+	if n.Val().Ctype() != CTxxx {
+		overflow(n.Val(), t1)
+	}
 	Convlit(np, t1)
 	lineno = int32(lno)
 	return
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 799a17e..a5cf920 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -1091,7 +1091,10 @@ func orderexpr(np **Node, order *Order, lhs *Node) {
 		OMAKESLICE,
 		ONEW,
 		OREAL,
-		ORECOVER:
+		ORECOVER,
+		OSTRARRAYBYTE,
+		OSTRARRAYBYTETMP,
+		OSTRARRAYRUNE:
 		ordercall(n, order)
 		if lhs == nil || lhs.Op != ONAME || flag_race != 0 {
 			n = ordercopyexpr(n, n.Type, order, 0)
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index befe3b2..e5ae967 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -2874,12 +2874,17 @@ func keydup(n *Node, hash map[uint32][]*Node) {
 			if Eqtype(a.Left.Type, n.Type) {
 				cmp.Right = a.Left
 				evconst(&cmp)
-				b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
+				if cmp.Op == OLITERAL {
+					// Sometimes evconst fails.  See issue 12536.
+					b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
+				}
 			}
 		} else if Eqtype(a.Type, n.Type) {
 			cmp.Right = a
 			evconst(&cmp)
-			b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
+			if cmp.Op == OLITERAL {
+				b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
+			}
 		}
 
 		if b != 0 {
diff --git a/src/cmd/compile/internal/ppc64/reg.go b/src/cmd/compile/internal/ppc64/reg.go
index fa1cb71..a301836 100644
--- a/src/cmd/compile/internal/ppc64/reg.go
+++ b/src/cmd/compile/internal/ppc64/reg.go
@@ -111,7 +111,7 @@ func regnames(n *int) []string {
 
 func excludedregs() uint64 {
 	// Exclude registers with fixed functions
-	regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
+	regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP))
 
 	// Also exclude floating point registers with fixed constants
 	regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index 184f973..1658e16 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -894,6 +894,7 @@ var buildorder = []string{
 	"crypto/sha1",
 	"debug/dwarf",
 	"debug/elf",
+	"debug/macho",
 	"cmd/go",
 }
 
diff --git a/src/cmd/go/note.go b/src/cmd/go/note.go
index 97e1865..c7346a5 100644
--- a/src/cmd/go/note.go
+++ b/src/cmd/go/note.go
@@ -7,6 +7,7 @@ package main
 import (
 	"bytes"
 	"debug/elf"
+	"debug/macho"
 	"encoding/binary"
 	"fmt"
 	"io"
@@ -69,11 +70,11 @@ func readELFNote(filename, name string, typ int32) ([]byte, error) {
 
 var elfGoNote = []byte("Go\x00\x00")
 
-// readELFGoBuildID the Go build ID string from an ELF binary.
-// The Go build ID is stored in a note described by an ELF PT_NOTE prog header.
-// The caller has already opened filename, to get f, and read the first 4 kB out, in data.
+// The Go build ID is stored in a note described by an ELF PT_NOTE prog
+// header.  The caller has already opened filename, to get f, and read
+// at least 4 kB out, in data.
 func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) {
-	// Assume the note content is in the first 4 kB, already read.
+	// Assume the note content is in the data, already read.
 	// Rewrite the ELF header to set shnum to 0, so that we can pass
 	// the data to elf.NewFile and it will decode the Prog list but not
 	// try to read the section headers and the string table from disk.
@@ -95,11 +96,31 @@ func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string,
 		return "", &os.PathError{Path: filename, Op: "parse", Err: err}
 	}
 	for _, p := range ef.Progs {
-		if p.Type != elf.PT_NOTE || p.Off >= uint64(len(data)) || p.Off+p.Filesz >= uint64(len(data)) || p.Filesz < 16 {
+		if p.Type != elf.PT_NOTE || p.Filesz < 16 {
 			continue
 		}
 
-		note := data[p.Off : p.Off+p.Filesz]
+		var note []byte
+		if p.Off+p.Filesz < uint64(len(data)) {
+			note = data[p.Off : p.Off+p.Filesz]
+		} else {
+			// For some linkers, such as the Solaris linker,
+			// the buildid may not be found in data (which
+			// likely contains the first 16kB of the file)
+			// or even the first few megabytes of the file
+			// due to differences in note segment placement;
+			// in that case, extract the note data manually.
+			_, err = f.Seek(int64(p.Off), 0)
+			if err != nil {
+				return "", err
+			}
+
+			note = make([]byte, p.Filesz)
+			_, err = io.ReadFull(f, note)
+			if err != nil {
+				return "", err
+			}
+		}
 		nameSize := ef.ByteOrder.Uint32(note)
 		valSize := ef.ByteOrder.Uint32(note[4:])
 		tag := ef.ByteOrder.Uint32(note[8:])
@@ -114,3 +135,42 @@ func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string,
 	// No note. Treat as successful but build ID empty.
 	return "", nil
 }
+
+// The Go build ID is stored at the beginning of the Mach-O __text segment.
+// The caller has already opened filename, to get f, and read a few kB out, in data.
+// Sadly, that's not guaranteed to hold the note, because there is an arbitrary amount
+// of other junk placed in the file ahead of the main text.
+func readMachoGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) {
+	// If the data we want has already been read, don't worry about Mach-O parsing.
+	// This is both an optimization and a hedge against the Mach-O parsing failing
+	// in the future due to, for example, the name of the __text section changing.
+	if b, err := readRawGoBuildID(filename, data); b != "" && err == nil {
+		return b, err
+	}
+
+	mf, err := macho.NewFile(f)
+	if err != nil {
+		return "", &os.PathError{Path: filename, Op: "parse", Err: err}
+	}
+
+	sect := mf.Section("__text")
+	if sect == nil {
+		// Every binary has a __text section. Something is wrong.
+		return "", &os.PathError{Path: filename, Op: "parse", Err: fmt.Errorf("cannot find __text section")}
+	}
+
+	// It should be in the first few bytes, but read a lot just in case,
+	// especially given our past problems on OS X with the build ID moving.
+	// There shouldn't be much difference between reading 4kB and 32kB:
+	// the hard part is getting to the data, not transferring it.
+	n := sect.Size
+	if n > uint64(BuildIDReadSize) {
+		n = uint64(BuildIDReadSize)
+	}
+	buf := make([]byte, n)
+	if _, err := f.ReadAt(buf, int64(sect.Offset)); err != nil {
+		return "", err
+	}
+
+	return readRawGoBuildID(filename, buf)
+}
diff --git a/src/cmd/go/note_test.go b/src/cmd/go/note_test.go
index 3d64451..31c83c7 100644
--- a/src/cmd/go/note_test.go
+++ b/src/cmd/go/note_test.go
@@ -11,6 +11,26 @@ import (
 )
 
 func TestNoteReading(t *testing.T) {
+	testNoteReading(t)
+}
+
+func TestNoteReading2K(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		t.Skipf("2kB is not enough on %s", runtime.GOOS)
+	}
+	// Set BuildIDReadSize to 2kB to exercise Mach-O parsing more strictly.
+	defer func(old int) {
+		main.BuildIDReadSize = old
+	}(main.BuildIDReadSize)
+	main.BuildIDReadSize = 2 * 1024
+
+	testNoteReading(t)
+}
+
+func testNoteReading(t *testing.T) {
+	if runtime.GOOS == "dragonfly" {
+		t.Skipf("TestNoteReading is broken on dragonfly - golang.org/issue/13364", runtime.GOOS)
+	}
 	tg := testgo(t)
 	defer tg.cleanup()
 	tg.tempFile("hello.go", `package main; func main() { print("hello, world\n") }`)
@@ -33,9 +53,6 @@ func TestNoteReading(t *testing.T) {
 		// no external linking
 		t.Logf("no external linking - skipping linkmode=external test")
 
-	case "solaris":
-		t.Logf("skipping - golang.org/issue/12178")
-
 	default:
 		tg.run("build", "-ldflags", "-buildid="+buildID+" -linkmode=external", "-o", tg.path("hello.exe"), tg.path("hello.go"))
 		id, err := main.ReadBuildIDFromBinary(tg.path("hello.exe"))
diff --git a/src/cmd/go/pkg.go b/src/cmd/go/pkg.go
index c481794..e1d1ed4 100644
--- a/src/cmd/go/pkg.go
+++ b/src/cmd/go/pkg.go
@@ -1781,8 +1781,17 @@ var (
 	goBuildEnd    = []byte("\"\n \xff")
 
 	elfPrefix = []byte("\x7fELF")
+
+	machoPrefixes = [][]byte{
+		{0xfe, 0xed, 0xfa, 0xce},
+		{0xfe, 0xed, 0xfa, 0xcf},
+		{0xce, 0xfa, 0xed, 0xfe},
+		{0xcf, 0xfa, 0xed, 0xfe},
+	}
 )
 
+var BuildIDReadSize = 32 * 1024 // changed for testing
+
 // ReadBuildIDFromBinary reads the build ID from a binary.
 //
 // ELF binaries store the build ID in a proper PT_NOTE section.
@@ -1797,10 +1806,11 @@ func ReadBuildIDFromBinary(filename string) (id string, err error) {
 		return "", &os.PathError{Op: "parse", Path: filename, Err: errBuildIDUnknown}
 	}
 
-	// Read the first 16 kB of the binary file.
+	// Read the first 32 kB of the binary file.
 	// That should be enough to find the build ID.
 	// In ELF files, the build ID is in the leading headers,
-	// which are typically less than 4 kB, not to mention 16 kB.
+	// which are typically less than 4 kB, not to mention 32 kB.
+	// In Mach-O files, there's no limit, so we have to parse the file.
 	// On other systems, we're trying to read enough that
 	// we get the beginning of the text segment in the read.
 	// The offset where the text segment begins in a hello
@@ -1808,7 +1818,6 @@ func ReadBuildIDFromBinary(filename string) (id string, err error) {
 	//
 	//	Plan 9: 0x20
 	//	Windows: 0x600
-	//	Mach-O: 0x2000
 	//
 	f, err := os.Open(filename)
 	if err != nil {
@@ -1816,7 +1825,7 @@ func ReadBuildIDFromBinary(filename string) (id string, err error) {
 	}
 	defer f.Close()
 
-	data := make([]byte, 16*1024)
+	data := make([]byte, BuildIDReadSize)
 	_, err = io.ReadFull(f, data)
 	if err == io.ErrUnexpectedEOF {
 		err = nil
@@ -1828,7 +1837,17 @@ func ReadBuildIDFromBinary(filename string) (id string, err error) {
 	if bytes.HasPrefix(data, elfPrefix) {
 		return readELFGoBuildID(filename, f, data)
 	}
+	for _, m := range machoPrefixes {
+		if bytes.HasPrefix(data, m) {
+			return readMachoGoBuildID(filename, f, data)
+		}
+	}
+
+	return readRawGoBuildID(filename, data)
+}
 
+// readRawGoBuildID finds the raw build ID stored in text segment data.
+func readRawGoBuildID(filename string, data []byte) (id string, err error) {
 	i := bytes.Index(data, goBuildPrefix)
 	if i < 0 {
 		// Missing. Treat as successful but build ID empty.
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 2955a00..f074d90 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -2173,7 +2173,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
 			r = int(p.To.Reg)
 		}
 		o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.To.Reg), uint32(v)&31)
-		if p.As == ASRAD && (v&0x20 != 0) {
+		if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
 			o1 |= 1 << 1 /* mb[5] */
 		}
 
diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go
index 74ec9dd..4eb2092 100644
--- a/src/cmd/link/internal/amd64/asm.go
+++ b/src/cmd/link/internal/amd64/asm.go
@@ -141,7 +141,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
 
 		return
 
-	case 256 + ld.R_X86_64_GOTPCREL:
+	case 256 + ld.R_X86_64_GOTPCREL, 256 + ld.R_X86_64_GOTPCRELX, 256 + ld.R_X86_64_REX_GOTPCRELX:
 		if targ.Type != obj.SDYNIMPORT {
 			// have symbol
 			if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go
index 508f055..9dedb0e 100644
--- a/src/cmd/link/internal/ld/elf.go
+++ b/src/cmd/link/internal/ld/elf.go
@@ -348,7 +348,23 @@ const (
 	R_X86_64_DTPOFF32          = 21
 	R_X86_64_GOTTPOFF          = 22
 	R_X86_64_TPOFF32           = 23
-	R_X86_64_COUNT             = 24
+	R_X86_64_PC64              = 24
+	R_X86_64_GOTOFF64          = 25
+	R_X86_64_GOTPC32           = 26
+	R_X86_64_GOT64             = 27
+	R_X86_64_GOTPCREL64        = 28
+	R_X86_64_GOTPC64           = 29
+	R_X86_64_GOTPLT64          = 30
+	R_X86_64_PLTOFF64          = 31
+	R_X86_64_SIZE32            = 32
+	R_X86_64_SIZE64            = 33
+	R_X86_64_GOTPC32_TLSDEC    = 34
+	R_X86_64_TLSDESC_CALL      = 35
+	R_X86_64_TLSDESC           = 36
+	R_X86_64_IRELATIVE         = 37
+	R_X86_64_PC32_BND          = 40
+	R_X86_64_GOTPCRELX         = 41
+	R_X86_64_REX_GOTPCRELX     = 42
 	R_AARCH64_ABS64            = 257
 	R_AARCH64_ABS32            = 258
 	R_AARCH64_CALL26           = 283
@@ -382,7 +398,6 @@ const (
 	R_ALPHA_GLOB_DAT           = 25
 	R_ALPHA_JMP_SLOT           = 26
 	R_ALPHA_RELATIVE           = 27
-	R_ALPHA_COUNT              = 28
 	R_ARM_NONE                 = 0
 	R_ARM_PC24                 = 1
 	R_ARM_ABS32                = 2
@@ -422,7 +437,6 @@ const (
 	R_ARM_RABS32               = 253
 	R_ARM_RPC24                = 254
 	R_ARM_RBASE                = 255
-	R_ARM_COUNT                = 38
 	R_386_NONE                 = 0
 	R_386_32                   = 1
 	R_386_PC32                 = 2
@@ -454,7 +468,11 @@ const (
 	R_386_TLS_DTPMOD32         = 35
 	R_386_TLS_DTPOFF32         = 36
 	R_386_TLS_TPOFF32          = 37
-	R_386_COUNT                = 38
+	R_386_TLS_GOTDESC          = 39
+	R_386_TLS_DESC_CALL        = 40
+	R_386_TLS_DESC             = 41
+	R_386_IRELATIVE            = 42
+	R_386_GOT32X               = 43
 	R_PPC_NONE                 = 0
 	R_PPC_ADDR32               = 1
 	R_PPC_ADDR24               = 2
@@ -492,7 +510,6 @@ const (
 	R_PPC_SECTOFF_LO           = 34
 	R_PPC_SECTOFF_HI           = 35
 	R_PPC_SECTOFF_HA           = 36
-	R_PPC_COUNT                = 37
 	R_PPC_TLS                  = 67
 	R_PPC_DTPMOD32             = 68
 	R_PPC_TPREL16              = 69
@@ -533,7 +550,6 @@ const (
 	R_PPC_EMB_RELST_HA         = 114
 	R_PPC_EMB_BIT_FLD          = 115
 	R_PPC_EMB_RELSDA           = 116
-	R_PPC_EMB_COUNT            = R_PPC_EMB_RELSDA - R_PPC_EMB_NADDR32 + 1
 	R_PPC64_REL24              = R_PPC_REL24
 	R_PPC64_JMP_SLOT           = R_PPC_JMP_SLOT
 	R_PPC64_ADDR64             = 38
@@ -1723,10 +1739,6 @@ func doelf() {
 			Addstring(shstrtab, ".note.go.pkg-list")
 			Addstring(shstrtab, ".note.go.deps")
 		}
-
-		if buildid != "" {
-			Addstring(shstrtab, ".note.go.buildid")
-		}
 	}
 
 	hasinitarr := Linkshared
diff --git a/src/cmd/link/internal/ld/ldelf.go b/src/cmd/link/internal/ld/ldelf.go
index 3efdb75..7ff37ad 100644
--- a/src/cmd/link/internal/ld/ldelf.go
+++ b/src/cmd/link/internal/ld/ldelf.go
@@ -1001,12 +1001,15 @@ func reltype(pn string, elftype int, siz *uint8) int {
 		'6' | R_X86_64_PC32<<24,
 		'6' | R_X86_64_PLT32<<24,
 		'6' | R_X86_64_GOTPCREL<<24,
+		'6' | R_X86_64_GOTPCRELX<<24,
+		'6' | R_X86_64_REX_GOTPCRELX<<24,
 		'8' | R_386_32<<24,
 		'8' | R_386_PC32<<24,
 		'8' | R_386_GOT32<<24,
 		'8' | R_386_PLT32<<24,
 		'8' | R_386_GOTOFF<<24,
 		'8' | R_386_GOTPC<<24,
+		'8' | R_386_GOT32X<<24,
 		'9' | R_PPC64_REL24<<24:
 		*siz = 4
 
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 8ccbec9..a9ec3d2 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -886,8 +886,8 @@ func archive() {
 
 	mayberemoveoutfile()
 	argv := []string{"ar", "-q", "-c", "-s", outfile}
-	argv = append(argv, hostobjCopy()...)
 	argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+	argv = append(argv, hostobjCopy()...)
 
 	if Debug['v'] != 0 {
 		fmt.Fprintf(&Bso, "archive: %s\n", strings.Join(argv, " "))
@@ -1012,8 +1012,8 @@ func hostlink() {
 		argv = append(argv, "-Qunused-arguments")
 	}
 
-	argv = append(argv, hostobjCopy()...)
 	argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+	argv = append(argv, hostobjCopy()...)
 
 	if Linkshared {
 		seenDirs := make(map[string]bool)
diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go
index d30bd48..6495385 100644
--- a/src/cmd/link/internal/x86/asm.go
+++ b/src/cmd/link/internal/x86/asm.go
@@ -78,7 +78,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
 
 		return
 
-	case 256 + ld.R_386_GOT32:
+	case 256 + ld.R_386_GOT32, 256 + ld.R_386_GOT32X:
 		if targ.Type != obj.SDYNIMPORT {
 			// have symbol
 			if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go
index 6f65a55..eeec974 100644
--- a/src/mime/multipart/multipart.go
+++ b/src/mime/multipart/multipart.go
@@ -25,6 +25,11 @@ import (
 
 var emptyParams = make(map[string]string)
 
+// This constant needs to be at least 76 for this package to work correctly.
+// This is because \r\n--separator_of_len_70- would fill the buffer and it
+// wouldn't be safe to consume a single byte from it.
+const peekBufferSize = 4096
+
 // A Part represents a single part in a multipart body.
 type Part struct {
 	// The headers of the body, if any, with the keys canonicalized
@@ -91,7 +96,7 @@ func (p *Part) parseContentDisposition() {
 func NewReader(r io.Reader, boundary string) *Reader {
 	b := []byte("\r\n--" + boundary + "--")
 	return &Reader{
-		bufReader:        bufio.NewReader(r),
+		bufReader:        bufio.NewReaderSize(r, peekBufferSize),
 		nl:               b[:2],
 		nlDashBoundary:   b[:len(b)-2],
 		dashBoundaryDash: b[2:],
@@ -148,7 +153,7 @@ func (pr partReader) Read(d []byte) (n int, err error) {
 		// the read request.  No need to parse more at the moment.
 		return p.buffer.Read(d)
 	}
-	peek, err := p.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor
+	peek, err := p.mr.bufReader.Peek(peekBufferSize) // TODO(bradfitz): add buffer size accessor
 
 	// Look for an immediate empty part without a leading \r\n
 	// before the boundary separator.  Some MIME code makes empty
@@ -229,6 +234,7 @@ func (r *Reader) NextPart() (*Part, error) {
 	expectNewPart := false
 	for {
 		line, err := r.bufReader.ReadSlice('\n')
+
 		if err == io.EOF && r.isFinalBoundary(line) {
 			// If the buffer ends in "--boundary--" without the
 			// trailing "\r\n", ReadSlice will return an error
@@ -343,13 +349,17 @@ func (mr *Reader) peekBufferIsEmptyPart(peek []byte) bool {
 // peekBufferSeparatorIndex returns the index of mr.nlDashBoundary in
 // peek and whether it is a real boundary (and not a prefix of an
 // unrelated separator). To be the end, the peek buffer must contain a
-// newline after the boundary.
+// newline after the boundary or contain the ending boundary (--separator--).
 func (mr *Reader) peekBufferSeparatorIndex(peek []byte) (idx int, isEnd bool) {
 	idx = bytes.Index(peek, mr.nlDashBoundary)
 	if idx == -1 {
 		return
 	}
+
 	peek = peek[idx+len(mr.nlDashBoundary):]
+	if len(peek) == 0 || len(peek) == 1 && peek[0] == '-' {
+		return idx, false
+	}
 	if len(peek) > 1 && peek[0] == '-' && peek[1] == '-' {
 		return idx, true
 	}
diff --git a/src/mime/multipart/multipart_test.go b/src/mime/multipart/multipart_test.go
index 30452d1..32cec57 100644
--- a/src/mime/multipart/multipart_test.go
+++ b/src/mime/multipart/multipart_test.go
@@ -616,6 +616,54 @@ html things
 			},
 		},
 	},
+	// Issue 12662: Check that we don't consume the leading \r if the peekBuffer
+	// ends in '\r\n--separator-'
+	{
+		name: "peek buffer boundary condition",
+		sep:  "00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db",
+		in: strings.Replace(`--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db
+Content-Disposition: form-data; name="block"; filename="block"
+Content-Type: application/octet-stream
+
+`+strings.Repeat("A", peekBufferSize-65)+"\n--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--", "\n", "\r\n", -1),
+		want: []headerBody{
+			{textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}},
+				strings.Repeat("A", peekBufferSize-65),
+			},
+		},
+	},
+	// Issue 12662: Same test as above with \r\n at the end
+	{
+		name: "peek buffer boundary condition",
+		sep:  "00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db",
+		in: strings.Replace(`--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db
+Content-Disposition: form-data; name="block"; filename="block"
+Content-Type: application/octet-stream
+
+`+strings.Repeat("A", peekBufferSize-65)+"\n--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--\n", "\n", "\r\n", -1),
+		want: []headerBody{
+			{textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}},
+				strings.Repeat("A", peekBufferSize-65),
+			},
+		},
+	},
+	// Issue 12662v2: We want to make sure that for short buffers that end with
+	// '\r\n--separator-' we always consume at least one (valid) symbol from the
+	// peekBuffer
+	{
+		name: "peek buffer boundary condition",
+		sep:  "aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db",
+		in: strings.Replace(`--aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db
+Content-Disposition: form-data; name="block"; filename="block"
+Content-Type: application/octet-stream
+
+`+strings.Repeat("A", peekBufferSize)+"\n--aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--", "\n", "\r\n", -1),
+		want: []headerBody{
+			{textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}},
+				strings.Repeat("A", peekBufferSize),
+			},
+		},
+	},
 
 	roundTripParseTest(),
 }
diff --git a/src/net/interface_windows.go b/src/net/interface_windows.go
index e25c1ed..8cb9d76 100644
--- a/src/net/interface_windows.go
+++ b/src/net/interface_windows.go
@@ -48,7 +48,7 @@ func getInterfaceInfos() ([]syscall.InterfaceInfo, error) {
 		return nil, os.NewSyscallError("wsaioctl", err)
 	}
 	iilen := ret / uint32(unsafe.Sizeof(iia[0]))
-	return iia[:iilen-1], nil
+	return iia[:iilen], nil
 }
 
 func bytesEqualIP(a []byte, b []int8) bool {
diff --git a/src/net/net_windows_test.go b/src/net/net_windows_test.go
index da03e10..4f6bd45 100644
--- a/src/net/net_windows_test.go
+++ b/src/net/net_windows_test.go
@@ -6,10 +6,13 @@ package net
 
 import (
 	"bufio"
+	"bytes"
 	"fmt"
 	"io"
 	"os"
 	"os/exec"
+	"sort"
+	"strings"
 	"syscall"
 	"testing"
 	"time"
@@ -163,3 +166,53 @@ func TestAcceptIgnoreSomeErrors(t *testing.T) {
 		t.Fatalf(`"%s" received from recv, but "abc" expected`, s)
 	}
 }
+
+func isWindowsXP(t *testing.T) bool {
+	v, err := syscall.GetVersion()
+	if err != nil {
+		t.Fatalf("GetVersion failed: %v", err)
+	}
+	major := byte(v)
+	return major < 6
+}
+
+func listInterfacesWithNetsh() ([]string, error) {
+	out, err := exec.Command("netsh", "interface", "ip", "show", "config").CombinedOutput()
+	if err != nil {
+		return nil, fmt.Errorf("netsh failed: %v: %q", err, string(out))
+	}
+	lines := bytes.Split(out, []byte{'\r', '\n'})
+	names := make([]string, 0)
+	for _, line := range lines {
+		f := bytes.Split(line, []byte{'"'})
+		if len(f) == 3 {
+			names = append(names, string(f[1]))
+		}
+	}
+	return names, nil
+}
+
+func TestInterfaceList(t *testing.T) {
+	if isWindowsXP(t) {
+		t.Skip("Windows XP netsh command does not provide required functionality")
+	}
+	ift, err := Interfaces()
+	if err != nil {
+		t.Fatal(err)
+	}
+	have := make([]string, 0)
+	for _, ifi := range ift {
+		have = append(have, ifi.Name)
+	}
+	sort.Strings(have)
+
+	want, err := listInterfacesWithNetsh()
+	if err != nil {
+		t.Fatal(err)
+	}
+	sort.Strings(want)
+
+	if strings.Join(want, "/") != strings.Join(have, "/") {
+		t.Fatalf("unexpected interface list %q, want %q", have, want)
+	}
+}
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 3b4ca4d..980b1ca 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -661,6 +661,8 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
 	// come in on the m->g0 stack already.
 	get_tls(CX)
 	MOVQ	g(CX), R8
+	CMPQ	R8, $0
+	JEQ	nosave
 	MOVQ	g_m(R8), R8
 	MOVQ	m_g0(R8), SI
 	MOVQ	g(CX), DI
@@ -670,11 +672,11 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
 	CMPQ	SI, DI
 	JEQ	nosave
 	
+	// Switch to system stack.
 	MOVQ	m_g0(R8), SI
 	CALL	gosave<>(SB)
 	MOVQ	SI, g(CX)
 	MOVQ	(g_sched+gobuf_sp)(SI), SP
-nosave:
 
 	// Now on a scheduling stack (a pthread-created stack).
 	// Make sure we have enough room for 4 stack-backed fast-call
@@ -700,6 +702,29 @@ nosave:
 	MOVL	AX, ret+16(FP)
 	RET
 
+nosave:
+	// Running on a system stack, perhaps even without a g.
+	// Having no g can happen during thread creation or thread teardown
+	// (see needm/dropm on Solaris, for example).
+	// This code is like the above sequence but without saving/restoring g
+	// and without worrying about the stack moving out from under us
+	// (because we're on a system stack, not a goroutine stack).
+	// The above code could be used directly if already on a system stack,
+	// but then the only path through this code would be a rare case on Solaris.
+	// Using this code for all "already on system stack" calls exercises it more,
+	// which should help keep it correct.
+	SUBQ	$64, SP
+	ANDQ	$~15, SP
+	MOVQ	$0, 48(SP)		// where above code stores g, in case someone looks during debugging
+	MOVQ	DX, 40(SP)	// save original stack pointer
+	MOVQ	BX, DI		// DI = first argument in AMD64 ABI
+	MOVQ	BX, CX		// CX = first argument in Win64
+	CALL	AX
+	MOVQ	40(SP), SI	// restore original stack pointer
+	MOVQ	SI, SP
+	MOVL	AX, ret+16(FP)
+	RET
+
 // cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
 // Turn the fn into a Go func (by taking its address) and call
 // cgocallback_gofunc.
diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s
index 6e97256..4b12f01 100644
--- a/src/runtime/asm_amd64p32.s
+++ b/src/runtime/asm_amd64p32.s
@@ -627,15 +627,18 @@ TEXT runtime·memclr(SB),NOSPLIT,$0-8
 	MOVL	ptr+0(FP), DI
 	MOVL	n+4(FP), CX
 	MOVQ	CX, BX
-	ANDQ	$7, BX
-	SHRQ	$3, CX
+	ANDQ	$3, BX
+	SHRQ	$2, CX
 	MOVQ	$0, AX
 	CLD
 	REP
-	STOSQ
+	STOSL
 	MOVQ	BX, CX
 	REP
 	STOSB
+	// Note: we zero only 4 bytes at a time so that the tail is at most
+	// 3 bytes.  That guarantees that we aren't zeroing pointers with STOSB.
+	// See issue 13160.
 	RET
 
 TEXT runtime·getcallerpc(SB),NOSPLIT,$8-12
diff --git a/src/runtime/cgo/cgo.go b/src/runtime/cgo/cgo.go
index cb24678..8f3e66f 100644
--- a/src/runtime/cgo/cgo.go
+++ b/src/runtime/cgo/cgo.go
@@ -20,7 +20,9 @@ package cgo
 #cgo !android,linux LDFLAGS: -lpthread
 #cgo netbsd LDFLAGS: -lpthread
 #cgo openbsd LDFLAGS: -lpthread
-#cgo windows LDFLAGS: -lm -mthreads
+// we must explicitly link msvcrt, because runtime needs ntdll, and ntdll
+// exports some incompatible libc functions. See golang.org/issue/12030.
+#cgo windows LDFLAGS: -lmsvcrt -lm -mthreads
 
 #cgo CFLAGS: -Wall -Werror
 
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 353f840..b966eed 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -401,7 +401,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
 			if p == h.arena_end {
 				h.arena_end = new_end
 				h.arena_reserved = reserved
-			} else if p+p_size <= h.arena_start+_MaxArena32 {
+			} else if h.arena_start <= p && p+p_size <= h.arena_start+_MaxArena32 {
 				// Keep everything page-aligned.
 				// Our pages are bigger than hardware pages.
 				h.arena_end = p + p_size
@@ -411,7 +411,10 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
 				h.arena_used = used
 				h.arena_reserved = reserved
 			} else {
-				var stat uint64
+				// We haven't added this allocation to
+				// the stats, so subtract it from a
+				// fake stat (but avoid underflow).
+				stat := uint64(p_size)
 				sysFree((unsafe.Pointer)(p), p_size, &stat)
 			}
 		}
diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go
index f988e75..e8c8999 100644
--- a/src/runtime/mem_linux.go
+++ b/src/runtime/mem_linux.go
@@ -69,29 +69,89 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
 }
 
 func sysUnused(v unsafe.Pointer, n uintptr) {
-	var s uintptr = hugePageSize // division by constant 0 is a compile-time error :(
-	if s != 0 && (uintptr(v)%s != 0 || n%s != 0) {
-		// See issue 8832
-		// Linux kernel bug: https://bugzilla.kernel.org/show_bug.cgi?id=93111
-		// Mark the region as NOHUGEPAGE so the kernel's khugepaged
-		// doesn't undo our DONTNEED request.  khugepaged likes to migrate
-		// regions which are only partially mapped to huge pages, including
-		// regions with some DONTNEED marks.  That needlessly allocates physical
-		// memory for our DONTNEED regions.
-		madvise(v, n, _MADV_NOHUGEPAGE)
+	// By default, Linux's "transparent huge page" support will
+	// merge pages into a huge page if there's even a single
+	// present regular page, undoing the effects of the DONTNEED
+	// below. On amd64, that means khugepaged can turn a single
+	// 4KB page to 2MB, bloating the process's RSS by as much as
+	// 512X. (See issue #8832 and Linux kernel bug
+	// https://bugzilla.kernel.org/show_bug.cgi?id=93111)
+	//
+	// To work around this, we explicitly disable transparent huge
+	// pages when we release pages of the heap. However, we have
+	// to do this carefully because changing this flag tends to
+	// split the VMA (memory mapping) containing v in to three
+	// VMAs in order to track the different values of the
+	// MADV_NOHUGEPAGE flag in the different regions. There's a
+	// default limit of 65530 VMAs per address space (sysctl
+	// vm.max_map_count), so we must be careful not to create too
+	// many VMAs (see issue #12233).
+	//
+	// Since huge pages are huge, there's little use in adjusting
+	// the MADV_NOHUGEPAGE flag on a fine granularity, so we avoid
+	// exploding the number of VMAs by only adjusting the
+	// MADV_NOHUGEPAGE flag on a large granularity. This still
+	// gets most of the benefit of huge pages while keeping the
+	// number of VMAs under control. With hugePageSize = 2MB, even
+	// a pessimal heap can reach 128GB before running out of VMAs.
+	if hugePageSize != 0 {
+		var s uintptr = hugePageSize // division by constant 0 is a compile-time error :(
+
+		// If it's a large allocation, we want to leave huge
+		// pages enabled. Hence, we only adjust the huge page
+		// flag on the huge pages containing v and v+n-1, and
+		// only if those aren't aligned.
+		var head, tail uintptr
+		if uintptr(v)%s != 0 {
+			// Compute huge page containing v.
+			head = uintptr(v) &^ (s - 1)
+		}
+		if (uintptr(v)+n)%s != 0 {
+			// Compute huge page containing v+n-1.
+			tail = (uintptr(v) + n - 1) &^ (s - 1)
+		}
+
+		// Note that madvise will return EINVAL if the flag is
+		// already set, which is quite likely. We ignore
+		// errors.
+		if head != 0 && head+hugePageSize == tail {
+			// head and tail are different but adjacent,
+			// so do this in one call.
+			madvise(unsafe.Pointer(head), 2*hugePageSize, _MADV_NOHUGEPAGE)
+		} else {
+			// Advise the huge pages containing v and v+n-1.
+			if head != 0 {
+				madvise(unsafe.Pointer(head), hugePageSize, _MADV_NOHUGEPAGE)
+			}
+			if tail != 0 && tail != head {
+				madvise(unsafe.Pointer(tail), hugePageSize, _MADV_NOHUGEPAGE)
+			}
+		}
 	}
+
 	madvise(v, n, _MADV_DONTNEED)
 }
 
 func sysUsed(v unsafe.Pointer, n uintptr) {
 	if hugePageSize != 0 {
-		// Undo the NOHUGEPAGE marks from sysUnused.  There is no alignment check
-		// around this call as spans may have been merged in the interim.
-		// Note that this might enable huge pages for regions which were
-		// previously disabled.  Unfortunately there is no easy way to detect
-		// what the previous state was, and in any case we probably want huge
-		// pages to back our heap if the kernel can arrange that.
-		madvise(v, n, _MADV_HUGEPAGE)
+		// Partially undo the NOHUGEPAGE marks from sysUnused
+		// for whole huge pages between v and v+n. This may
+		// leave huge pages off at the end points v and v+n
+		// even though allocations may cover these entire huge
+		// pages. We could detect this and undo NOHUGEPAGE on
+		// the end points as well, but it's probably not worth
+		// the cost because when neighboring allocations are
+		// freed sysUnused will just set NOHUGEPAGE again.
+		var s uintptr = hugePageSize
+
+		// Round v up to a huge page boundary.
+		beg := (uintptr(v) + (s - 1)) &^ (s - 1)
+		// Round v+n down to a huge page boundary.
+		end := (uintptr(v) + n) &^ (s - 1)
+
+		if beg < end {
+			madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
+		}
 	}
 }
 
diff --git a/src/runtime/memclr_386.s b/src/runtime/memclr_386.s
index 3f20b69..ce962f3 100644
--- a/src/runtime/memclr_386.s
+++ b/src/runtime/memclr_386.s
@@ -21,7 +21,8 @@ tail:
 	CMPL	BX, $2
 	JBE	_1or2
 	CMPL	BX, $4
-	JBE	_3or4
+	JB	_3
+	JE	_4
 	CMPL	BX, $8
 	JBE	_5through8
 	CMPL	BX, $16
@@ -68,9 +69,13 @@ _1or2:
 	RET
 _0:
 	RET
-_3or4:
+_3:
 	MOVW	AX, (DI)
-	MOVW	AX, -2(DI)(BX*1)
+	MOVB	AX, 2(DI)
+	RET
+_4:
+	// We need a separate case for 4 to make sure we clear pointers atomically.
+	MOVL	AX, (DI)
 	RET
 _5through8:
 	MOVL	AX, (DI)
diff --git a/src/runtime/memclr_amd64.s b/src/runtime/memclr_amd64.s
index ec24f1d..3e2c4b2 100644
--- a/src/runtime/memclr_amd64.s
+++ b/src/runtime/memclr_amd64.s
@@ -23,7 +23,8 @@ tail:
 	CMPQ	BX, $4
 	JBE	_3or4
 	CMPQ	BX, $8
-	JBE	_5through8
+	JB	_5through7
+	JE	_8
 	CMPQ	BX, $16
 	JBE	_9through16
 	PXOR	X0, X0
@@ -71,10 +72,14 @@ _3or4:
 	MOVW	AX, (DI)
 	MOVW	AX, -2(DI)(BX*1)
 	RET
-_5through8:
+_5through7:
 	MOVL	AX, (DI)
 	MOVL	AX, -4(DI)(BX*1)
 	RET
+_8:
+	// We need a separate case for 8 to make sure we clear pointers atomically.
+	MOVQ	AX, (DI)
+	RET
 _9through16:
 	MOVQ	AX, (DI)
 	MOVQ	AX, -8(DI)(BX*1)
diff --git a/src/runtime/memclr_arm64.s b/src/runtime/memclr_arm64.s
index c44c123..47c6b73 100644
--- a/src/runtime/memclr_arm64.s
+++ b/src/runtime/memclr_arm64.s
@@ -8,11 +8,30 @@
 TEXT runtime·memclr(SB),NOSPLIT,$0-16
 	MOVD	ptr+0(FP), R3
 	MOVD	n+8(FP), R4
-	CMP	$0, R4
-	BEQ	done
-	ADD	R3, R4, R4
+	// TODO(mwhudson): this is written this way to avoid tickling
+	// warnings from addpool when written as AND $7, R4, R6 (see
+	// https://golang.org/issue/12708)
+	AND	$~7, R4, R5	// R5 is N&~7
+	SUB	R5, R4, R6	// R6 is N&7
+
+	CMP	$0, R5
+	BEQ	nowords
+
+	ADD	R3, R5, R5
+
+wordloop: // TODO: Optimize for unaligned ptr.
+	MOVD.P	$0, 8(R3)
+	CMP	R3, R5
+	BNE	wordloop
+nowords:
+        CMP	$0, R6
+        BEQ	done
+
+	ADD	R3, R6, R6
+
+byteloop:
 	MOVBU.P	$0, 1(R3)
-	CMP	R3, R4
-	BNE	-2(PC)
+	CMP	R3, R6
+	BNE	byteloop
 done:
 	RET
diff --git a/src/runtime/memclr_plan9_386.s b/src/runtime/memclr_plan9_386.s
index 50f327b..4707ab2 100644
--- a/src/runtime/memclr_plan9_386.s
+++ b/src/runtime/memclr_plan9_386.s
@@ -16,7 +16,8 @@ tail:
 	CMPL	BX, $2
 	JBE	_1or2
 	CMPL	BX, $4
-	JBE	_3or4
+	JB	_3
+	JE	_4
 	CMPL	BX, $8
 	JBE	_5through8
 	CMPL	BX, $16
@@ -35,9 +36,13 @@ _1or2:
 	RET
 _0:
 	RET
-_3or4:
+_3:
 	MOVW	AX, (DI)
-	MOVW	AX, -2(DI)(BX*1)
+	MOVB	AX, 2(DI)
+	RET
+_4:
+	// We need a separate case for 4 to make sure we clear pointers atomically.
+	MOVL	AX, (DI)
 	RET
 _5through8:
 	MOVL	AX, (DI)
diff --git a/src/runtime/memclr_ppc64x.s b/src/runtime/memclr_ppc64x.s
index cea42cb..90e2748 100644
--- a/src/runtime/memclr_ppc64x.s
+++ b/src/runtime/memclr_ppc64x.s
@@ -10,11 +10,22 @@
 TEXT runtime·memclr(SB),NOSPLIT,$0-16
 	MOVD	ptr+0(FP), R3
 	MOVD	n+8(FP), R4
-	CMP	R4, $0
+	SRADCC	$3, R4, R6	// R6 is the number of words to zero
+	BEQ	bytes
+
+	SUB	$8, R3
+	MOVD	R6, CTR
+	MOVDU	R0, 8(R3)
+	BC	25, 0, -1(PC)	// bdnz+ $-4
+	ADD	$8, R3
+
+bytes:
+	ANDCC	$7, R4, R7	// R7 is the number of bytes to zero
 	BEQ	done
 	SUB	$1, R3
-	MOVD	R4, CTR
+	MOVD	R7, CTR
 	MOVBU	R0, 1(R3)
-	BC	25, 0, -1(PC) // bdnz+ $-4
+	BC	25, 0, -1(PC)	// bdnz+ $-4
+
 done:
 	RET
diff --git a/src/runtime/memmove_386.s b/src/runtime/memmove_386.s
index 4c0c74c..f72a73a 100644
--- a/src/runtime/memmove_386.s
+++ b/src/runtime/memmove_386.s
@@ -43,7 +43,8 @@ tail:
 	CMPL	BX, $2
 	JBE	move_1or2
 	CMPL	BX, $4
-	JBE	move_3or4
+	JB	move_3
+	JE	move_4
 	CMPL	BX, $8
 	JBE	move_5through8
 	CMPL	BX, $16
@@ -118,11 +119,16 @@ move_1or2:
 	RET
 move_0:
 	RET
-move_3or4:
+move_3:
 	MOVW	(SI), AX
-	MOVW	-2(SI)(BX*1), CX
+	MOVB	2(SI), CX
 	MOVW	AX, (DI)
-	MOVW	CX, -2(DI)(BX*1)
+	MOVB	CX, 2(DI)
+	RET
+move_4:
+	// We need a separate case for 4 to make sure we write pointers atomically.
+	MOVL	(SI), AX
+	MOVL	AX, (DI)
 	RET
 move_5through8:
 	MOVL	(SI), AX
diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s
index f968435..e14614d 100644
--- a/src/runtime/memmove_amd64.s
+++ b/src/runtime/memmove_amd64.s
@@ -50,7 +50,8 @@ tail:
 	CMPQ	BX, $4
 	JBE	move_3or4
 	CMPQ	BX, $8
-	JBE	move_5through8
+	JB	move_5through7
+	JE	move_8
 	CMPQ	BX, $16
 	JBE	move_9through16
 	CMPQ	BX, $32
@@ -131,12 +132,17 @@ move_3or4:
 	MOVW	AX, (DI)
 	MOVW	CX, -2(DI)(BX*1)
 	RET
-move_5through8:
+move_5through7:
 	MOVL	(SI), AX
 	MOVL	-4(SI)(BX*1), CX
 	MOVL	AX, (DI)
 	MOVL	CX, -4(DI)(BX*1)
 	RET
+move_8:
+	// We need a separate case for 8 to make sure we write pointers atomically.
+	MOVQ	(SI), AX
+	MOVQ	AX, (DI)
+	RET
 move_9through16:
 	MOVQ	(SI), AX
 	MOVQ	-8(SI)(BX*1), CX
diff --git a/src/runtime/memmove_arm64.s b/src/runtime/memmove_arm64.s
index 66059a7..00813d4 100644
--- a/src/runtime/memmove_arm64.s
+++ b/src/runtime/memmove_arm64.s
@@ -14,23 +14,78 @@ TEXT runtime·memmove(SB), NOSPLIT, $-8-24
 	RET
 
 check:
+	AND	$~7, R5, R7	// R7 is N&~7
+	// TODO(mwhudson): this is written this way to avoid tickling
+	// warnings from addpool when written as AND $7, R5, R6 (see
+	// https://golang.org/issue/12708)
+	SUB	R7, R5, R6	// R6 is N&7
+
 	CMP	R3, R4
 	BLT	backward
 
-	ADD	R3, R5
-loop:
-	MOVBU.P	1(R4), R6
-	MOVBU.P	R6, 1(R3)
-	CMP	R3, R5
-	BNE	loop
+	// Copying forward proceeds by copying R7/8 words then copying R6 bytes.
+	// R3 and R4 are advanced as we copy.
+
+        // (There may be implementations of armv8 where copying by bytes until
+        // at least one of source or dest is word aligned is a worthwhile
+        // optimization, but the on the one tested so far (xgene) it did not
+        // make a significance difference.)
+
+	CMP	$0, R7		// Do we need to do any word-by-word copying?
+	BEQ	noforwardlarge
+
+	ADD	R3, R7, R9	// R9 points just past where we copy by word
+
+forwardlargeloop:
+	MOVD.P	8(R4), R8	// R8 is just a scratch register
+	MOVD.P	R8, 8(R3)
+	CMP	R3, R9
+	BNE	forwardlargeloop
+
+noforwardlarge:
+	CMP	$0, R6		// Do we need to do any byte-by-byte copying?
+	BNE	forwardtail
+	RET
+
+forwardtail:
+	ADD	R3, R6, R9	// R9 points just past the destination memory
+
+forwardtailloop:
+	MOVBU.P 1(R4), R8
+	MOVBU.P	R8, 1(R3)
+	CMP	R3, R9
+	BNE	forwardtailloop
 	RET
 
 backward:
-	ADD	R5, R4
-	ADD	R3, R5
-loop1:
-	MOVBU.W	-1(R4), R6
-	MOVBU.W	R6, -1(R5)
-	CMP	R3, R5
-	BNE	loop1
+	// Copying backwards proceeds by copying R6 bytes then copying R7/8 words.
+	// R3 and R4 are advanced to the end of the destination/source buffers
+	// respectively and moved back as we copy.
+
+	ADD	R4, R5, R4	// R4 points just past the last source byte
+	ADD	R3, R5, R3	// R3 points just past the last destination byte
+
+	CMP	$0, R6		// Do we need to do any byte-by-byte copying?
+	BEQ	nobackwardtail
+
+	SUB	R6, R3, R9	// R9 points at the lowest destination byte that should be copied by byte.
+backwardtailloop:
+	MOVBU.W	-1(R4), R8
+	MOVBU.W	R8, -1(R3)
+	CMP	R9, R3
+	BNE	backwardtailloop
+
+nobackwardtail:
+	CMP     $0, R7		// Do we need to do any word-by-word copying?
+	BNE	backwardlarge
+	RET
+
+backwardlarge:
+        SUB	R7, R3, R9      // R9 points at the lowest destination byte
+
+backwardlargeloop:
+	MOVD.W	-8(R4), R8
+	MOVD.W	R8, -8(R3)
+	CMP	R9, R3
+	BNE	backwardlargeloop
 	RET
diff --git a/src/runtime/memmove_nacl_amd64p32.s b/src/runtime/memmove_nacl_amd64p32.s
index 373607a..dd7ac76 100644
--- a/src/runtime/memmove_nacl_amd64p32.s
+++ b/src/runtime/memmove_nacl_amd64p32.s
@@ -4,6 +4,9 @@
 
 #include "textflag.h"
 
+// This could use MOVSQ, but we use MOVSL so that if an object ends in
+// a 4 byte pointer, we copy it as a unit instead of byte by byte.
+
 TEXT runtime·memmove(SB), NOSPLIT, $0-12
 	MOVL	to+0(FP), DI
 	MOVL	from+4(FP), SI
@@ -14,9 +17,9 @@ TEXT runtime·memmove(SB), NOSPLIT, $0-12
 
 forward:
 	MOVL	BX, CX
-	SHRL	$3, CX
-	ANDL	$7, BX
-	REP; MOVSQ
+	SHRL	$2, CX
+	ANDL	$3, BX
+	REP; MOVSL
 	MOVL	BX, CX
 	REP; MOVSB
 	RET
@@ -32,15 +35,18 @@ back:
 	STD
 	
 	MOVL	BX, CX
-	SHRL	$3, CX
-	ANDL	$7, BX
-	SUBL	$8, DI
-	SUBL	$8, SI
-	REP; MOVSQ
-	ADDL	$7, DI
-	ADDL	$7, SI
+	SHRL	$2, CX
+	ANDL	$3, BX
+	SUBL	$4, DI
+	SUBL	$4, SI
+	REP; MOVSL
+	ADDL	$3, DI
+	ADDL	$3, SI
 	MOVL	BX, CX
 	REP; MOVSB
 	CLD
 
+	// Note: we copy only 4 bytes at a time so that the tail is at most
+	// 3 bytes.  That guarantees that we aren't copying pointers with MOVSB.
+	// See issue 13160.
 	RET
diff --git a/src/runtime/memmove_plan9_386.s b/src/runtime/memmove_plan9_386.s
index 025d4ce..3b492eb 100644
--- a/src/runtime/memmove_plan9_386.s
+++ b/src/runtime/memmove_plan9_386.s
@@ -39,7 +39,8 @@ tail:
 	CMPL	BX, $2
 	JBE	move_1or2
 	CMPL	BX, $4
-	JBE	move_3or4
+	JB	move_3
+	JE	move_4
 	CMPL	BX, $8
 	JBE	move_5through8
 	CMPL	BX, $16
@@ -104,11 +105,16 @@ move_1or2:
 	RET
 move_0:
 	RET
-move_3or4:
+move_3:
 	MOVW	(SI), AX
-	MOVW	-2(SI)(BX*1), CX
+	MOVB	2(SI), CX
 	MOVW	AX, (DI)
-	MOVW	CX, -2(DI)(BX*1)
+	MOVB	CX, 2(DI)
+	RET
+move_4:
+	// We need a separate case for 4 to make sure we write pointers atomically.
+	MOVL	(SI), AX
+	MOVL	AX, (DI)
 	RET
 move_5through8:
 	MOVL	(SI), AX
diff --git a/src/runtime/memmove_plan9_amd64.s b/src/runtime/memmove_plan9_amd64.s
index 8e96b87..a1cc255 100644
--- a/src/runtime/memmove_plan9_amd64.s
+++ b/src/runtime/memmove_plan9_amd64.s
@@ -43,7 +43,8 @@ tail:
 	CMPQ	BX, $4
 	JBE	move_3or4
 	CMPQ	BX, $8
-	JBE	move_5through8
+	JB	move_5through7
+	JE	move_8
 	CMPQ	BX, $16
 	JBE	move_9through16
 
@@ -113,12 +114,17 @@ move_3or4:
 	MOVW	AX, (DI)
 	MOVW	CX, -2(DI)(BX*1)
 	RET
-move_5through8:
+move_5through7:
 	MOVL	(SI), AX
 	MOVL	-4(SI)(BX*1), CX
 	MOVL	AX, (DI)
 	MOVL	CX, -4(DI)(BX*1)
 	RET
+move_8:
+	// We need a separate case for 8 to make sure we write pointers atomically.
+	MOVQ	(SI), AX
+	MOVQ	AX, (DI)
+	RET
 move_9through16:
 	MOVQ	(SI), AX
 	MOVQ	-8(SI)(BX*1), CX
diff --git a/src/runtime/memmove_ppc64x.s b/src/runtime/memmove_ppc64x.s
index 3ada63e..72c90de 100644
--- a/src/runtime/memmove_ppc64x.s
+++ b/src/runtime/memmove_ppc64x.s
@@ -16,25 +16,73 @@ TEXT runtime·memmove(SB), NOSPLIT, $-8-24
 	RET
 
 check:
-	CMP	R3, R4
-	BGT	backward
+	ANDCC	$7, R5, R7	// R7 is the number of bytes to copy and CR0[EQ] is set if there are none.
+	SRAD	$3, R5, R6	// R6 is the number of words to copy
+	CMP	R6, $0, CR1	// CR1[EQ] is set if there are no words to copy.
 
+	CMP	R3, R4, CR2
+	BC	12, 9, backward	// I think you should be able to write this as "BGT CR2, backward"
+
+	// Copying forward proceeds by copying R6 words then copying R7 bytes.
+	// R3 and R4 are advanced as we copy. Becuase PPC64 lacks post-increment
+	// load/store, R3 and R4 point before the bytes that are to be copied.
+
+	BC	12, 6, noforwardlarge	// "BEQ CR1, noforwardlarge"
+
+	MOVD	R6, CTR
+
+	SUB	$8, R3
+	SUB	$8, R4
+
+forwardlargeloop:
+	MOVDU	8(R4), R8
+	MOVDU	R8, 8(R3)
+	BC	16, 0, forwardlargeloop // "BDNZ"
+
+	ADD	$8, R3
+	ADD	$8, R4
+
+noforwardlarge:
+	BNE	forwardtail	// Tests the bit set by ANDCC above
+	RET
+
+forwardtail:
 	SUB	$1, R3
-	ADD	R3, R5
 	SUB	$1, R4
-loop:
-	MOVBU	1(R4), R6
-	MOVBU	R6, 1(R3)
-	CMP	R3, R5
-	BNE	loop
+	MOVD	R7, CTR
+
+forwardtailloop:
+	MOVBZU	1(R4), R8
+	MOVBZU	R8, 1(R3)
+	BC	16, 0, forwardtailloop
 	RET
 
 backward:
-	ADD	R5, R4
-	ADD	R3, R5
-loop1:
-	MOVBU	-1(R4), R6
-	MOVBU	R6, -1(R5)
-	CMP	R3, R5
-	BNE	loop1
+	// Copying backwards proceeds by copying R7 bytes then copying R6 words.
+	// R3 and R4 are advanced to the end of the destination/source buffers
+	// respectively and moved back as we copy.
+
+	ADD	R5, R4, R4
+	ADD	R3, R5, R3
+
+	BEQ	nobackwardtail
+
+	MOVD	R7, CTR
+
+backwardtailloop:
+	MOVBZU	-1(R4), R8
+	MOVBZU	R8, -1(R3)
+	BC	16, 0, backwardtailloop
+
+nobackwardtail:
+	BC	4, 6, backwardlarge		// "BNE CR1"
+	RET
+
+backwardlarge:
+	MOVD	R6, CTR
+
+backwardlargeloop:
+	MOVDU	-8(R4), R8
+	MOVDU	R8, -8(R3)
+	BC	16, 0, backwardlargeloop	// "BDNZ"
 	RET
diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go
index 857f99b..d5a2ad8 100644
--- a/src/runtime/memmove_test.go
+++ b/src/runtime/memmove_test.go
@@ -116,6 +116,41 @@ func BenchmarkMemmove1024(b *testing.B) { bmMemmove(b, 1024) }
 func BenchmarkMemmove2048(b *testing.B) { bmMemmove(b, 2048) }
 func BenchmarkMemmove4096(b *testing.B) { bmMemmove(b, 4096) }
 
+func bmMemmoveUnaligned(b *testing.B, n int) {
+	x := make([]byte, n+1)
+	y := make([]byte, n)
+	b.SetBytes(int64(n))
+	for i := 0; i < b.N; i++ {
+		copy(x[1:], y)
+	}
+}
+
+func BenchmarkMemmoveUnaligned0(b *testing.B)    { bmMemmoveUnaligned(b, 0) }
+func BenchmarkMemmoveUnaligned1(b *testing.B)    { bmMemmoveUnaligned(b, 1) }
+func BenchmarkMemmoveUnaligned2(b *testing.B)    { bmMemmoveUnaligned(b, 2) }
+func BenchmarkMemmoveUnaligned3(b *testing.B)    { bmMemmoveUnaligned(b, 3) }
+func BenchmarkMemmoveUnaligned4(b *testing.B)    { bmMemmoveUnaligned(b, 4) }
+func BenchmarkMemmoveUnaligned5(b *testing.B)    { bmMemmoveUnaligned(b, 5) }
+func BenchmarkMemmoveUnaligned6(b *testing.B)    { bmMemmoveUnaligned(b, 6) }
+func BenchmarkMemmoveUnaligned7(b *testing.B)    { bmMemmoveUnaligned(b, 7) }
+func BenchmarkMemmoveUnaligned8(b *testing.B)    { bmMemmoveUnaligned(b, 8) }
+func BenchmarkMemmoveUnaligned9(b *testing.B)    { bmMemmoveUnaligned(b, 9) }
+func BenchmarkMemmoveUnaligned10(b *testing.B)   { bmMemmoveUnaligned(b, 10) }
+func BenchmarkMemmoveUnaligned11(b *testing.B)   { bmMemmoveUnaligned(b, 11) }
+func BenchmarkMemmoveUnaligned12(b *testing.B)   { bmMemmoveUnaligned(b, 12) }
+func BenchmarkMemmoveUnaligned13(b *testing.B)   { bmMemmoveUnaligned(b, 13) }
+func BenchmarkMemmoveUnaligned14(b *testing.B)   { bmMemmoveUnaligned(b, 14) }
+func BenchmarkMemmoveUnaligned15(b *testing.B)   { bmMemmoveUnaligned(b, 15) }
+func BenchmarkMemmoveUnaligned16(b *testing.B)   { bmMemmoveUnaligned(b, 16) }
+func BenchmarkMemmoveUnaligned32(b *testing.B)   { bmMemmoveUnaligned(b, 32) }
+func BenchmarkMemmoveUnaligned64(b *testing.B)   { bmMemmoveUnaligned(b, 64) }
+func BenchmarkMemmoveUnaligned128(b *testing.B)  { bmMemmoveUnaligned(b, 128) }
+func BenchmarkMemmoveUnaligned256(b *testing.B)  { bmMemmoveUnaligned(b, 256) }
+func BenchmarkMemmoveUnaligned512(b *testing.B)  { bmMemmoveUnaligned(b, 512) }
+func BenchmarkMemmoveUnaligned1024(b *testing.B) { bmMemmoveUnaligned(b, 1024) }
+func BenchmarkMemmoveUnaligned2048(b *testing.B) { bmMemmoveUnaligned(b, 2048) }
+func BenchmarkMemmoveUnaligned4096(b *testing.B) { bmMemmoveUnaligned(b, 4096) }
+
 func TestMemclr(t *testing.T) {
 	size := 512
 	if testing.Short() {
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 44f9512..151a8bd 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -152,6 +152,11 @@ func gcAssistAlloc(size uintptr, allowAssist bool) {
 	}
 
 	// Record allocation.
+	if gp.gcalloc+size < gp.gcalloc {
+		// gcalloc would overflow, or it's set to a sentinel
+		// value to prevent recursive assist.
+		return
+	}
 	gp.gcalloc += size
 
 	if !allowAssist {
@@ -292,7 +297,12 @@ retry:
 		// more, so go around again after performing an
 		// interruptible sleep for 100 us (the same as the
 		// getfull barrier) to let other mutators run.
+
+		// timeSleep may allocate, so avoid recursive assist.
+		gcalloc := gp.gcalloc
+		gp.gcalloc = ^uintptr(0)
 		timeSleep(100 * 1000)
+		gp.gcalloc = gcalloc
 		goto retry
 	}
 }
@@ -355,6 +365,8 @@ func scanstack(gp *g) {
 			throw("g already has stack barriers")
 		}
 
+		gcLockStackBarriers(gp)
+
 	case _GCmarktermination:
 		if int(gp.stkbarPos) == len(gp.stkbar) {
 			// gp hit all of the stack barriers (or there
@@ -409,6 +421,9 @@ func scanstack(gp *g) {
 	if gcphase == _GCmarktermination {
 		gcw.dispose()
 	}
+	if gcphase == _GCscan {
+		gcUnlockStackBarriers(gp)
+	}
 	gp.gcscanvalid = true
 }
 
@@ -562,6 +577,8 @@ func gcRemoveStackBarriers(gp *g) {
 		print("hit ", gp.stkbarPos, " stack barriers, goid=", gp.goid, "\n")
 	}
 
+	gcLockStackBarriers(gp)
+
 	// Remove stack barriers that we didn't hit.
 	for _, stkbar := range gp.stkbar[gp.stkbarPos:] {
 		gcRemoveStackBarrier(gp, stkbar)
@@ -571,6 +588,8 @@ func gcRemoveStackBarriers(gp *g) {
 	// adjust them.
 	gp.stkbarPos = 0
 	gp.stkbar = gp.stkbar[:0]
+
+	gcUnlockStackBarriers(gp)
 }
 
 // gcRemoveStackBarrier removes a single stack barrier. It is the
@@ -589,22 +608,36 @@ func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
 		printlock()
 		print("at *", hex(stkbar.savedLRPtr), " expected stack barrier PC ", hex(stackBarrierPC), ", found ", hex(val), ", goid=", gp.goid, "\n")
 		print("gp.stkbar=")
-		gcPrintStkbars(gp.stkbar)
-		print(", gp.stkbarPos=", gp.stkbarPos, ", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
+		gcPrintStkbars(gp, -1)
+		print(", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
 		throw("stack barrier lost")
 	}
 	*lrPtr = uintreg(stkbar.savedLRVal)
 }
 
-// gcPrintStkbars prints a []stkbar for debugging.
-func gcPrintStkbars(stkbar []stkbar) {
+// gcPrintStkbars prints the stack barriers of gp for debugging. It
+// places a "@@@" marker at gp.stkbarPos. If marker >= 0, it will also
+// place a "==>" marker before the marker'th entry.
+func gcPrintStkbars(gp *g, marker int) {
 	print("[")
-	for i, s := range stkbar {
+	for i, s := range gp.stkbar {
 		if i > 0 {
 			print(" ")
 		}
+		if i == int(gp.stkbarPos) {
+			print("@@@ ")
+		}
+		if i == marker {
+			print("==> ")
+		}
 		print("*", hex(s.savedLRPtr), "=", hex(s.savedLRVal))
 	}
+	if int(gp.stkbarPos) == len(gp.stkbar) {
+		print(" @@@")
+	}
+	if marker == len(gp.stkbar) {
+		print(" ==>")
+	}
 	print("]")
 }
 
@@ -617,6 +650,7 @@ func gcPrintStkbars(stkbar []stkbar) {
 //
 //go:nosplit
 func gcUnwindBarriers(gp *g, sp uintptr) {
+	gcLockStackBarriers(gp)
 	// On LR machines, if there is a stack barrier on the return
 	// from the frame containing sp, this will mark it as hit even
 	// though it isn't, but it's okay to be conservative.
@@ -625,9 +659,12 @@ func gcUnwindBarriers(gp *g, sp uintptr) {
 		gcRemoveStackBarrier(gp, gp.stkbar[gp.stkbarPos])
 		gp.stkbarPos++
 	}
+	gcUnlockStackBarriers(gp)
 	if debugStackBarrier && gp.stkbarPos != before {
 		print("skip barriers below ", hex(sp), " in goid=", gp.goid, ": ")
-		gcPrintStkbars(gp.stkbar[before:gp.stkbarPos])
+		// We skipped barriers between the "==>" marker
+		// (before) and the "@@@" marker (gp.stkbarPos).
+		gcPrintStkbars(gp, int(before))
 		print("\n")
 	}
 }
@@ -648,6 +685,28 @@ func setNextBarrierPC(pc uintptr) {
 	gp.stkbar[gp.stkbarPos].savedLRVal = pc
 }
 
+// gcLockStackBarriers synchronizes with tracebacks of gp's stack
+// during sigprof for installation or removal of stack barriers. It
+// blocks until any current sigprof is done tracebacking gp's stack
+// and then disallows profiling tracebacks of gp's stack.
+//
+// This is necessary because a sigprof during barrier installation or
+// removal could observe inconsistencies between the stkbar array and
+// the stack itself and crash.
+func gcLockStackBarriers(gp *g) {
+	for !cas(&gp.stackLock, 0, 1) {
+		osyield()
+	}
+}
+
+func gcTryLockStackBarriers(gp *g) bool {
+	return cas(&gp.stackLock, 0, 1)
+}
+
+func gcUnlockStackBarriers(gp *g) {
+	atomicstore(&gp.stackLock, 0)
+}
+
 // TODO(austin): Can we consolidate the gcDrain* functions?
 
 // gcDrain scans objects in work buffers, blackening grey
diff --git a/src/runtime/os1_darwin.go b/src/runtime/os1_darwin.go
index e070229..c9dba15 100644
--- a/src/runtime/os1_darwin.go
+++ b/src/runtime/os1_darwin.go
@@ -130,6 +130,7 @@ func mpreinit(mp *m) {
 	mp.gsignal.m = mp
 }
 
+//go:nosplit
 func msigsave(mp *m) {
 	smask := (*uint32)(unsafe.Pointer(&mp.sigmask))
 	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -138,6 +139,17 @@ func msigsave(mp *m) {
 	sigprocmask(_SIG_SETMASK, nil, smask)
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+	smask := (*uint32)(unsafe.Pointer(&mp.sigmask))
+	sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+	sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
@@ -156,10 +168,8 @@ func minit() {
 }
 
 // Called from dropm to undo the effect of an minit.
+//go:nosplit
 func unminit() {
-	_g_ := getg()
-	smask := (*uint32)(unsafe.Pointer(&_g_.m.sigmask))
-	sigprocmask(_SIG_SETMASK, smask, nil)
 	signalstack(nil)
 }
 
@@ -459,6 +469,7 @@ func getsig(i int32) uintptr {
 	return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
 }
 
+//go:nosplit
 func signalstack(s *stack) {
 	var st stackt
 	if s == nil {
diff --git a/src/runtime/os1_dragonfly.go b/src/runtime/os1_dragonfly.go
index f96c78c..da70014 100644
--- a/src/runtime/os1_dragonfly.go
+++ b/src/runtime/os1_dragonfly.go
@@ -119,6 +119,7 @@ func mpreinit(mp *m) {
 	mp.gsignal.m = mp
 }
 
+//go:nosplit
 func msigsave(mp *m) {
 	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
 	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -127,6 +128,17 @@ func msigsave(mp *m) {
 	sigprocmask(_SIG_SETMASK, nil, smask)
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+	sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+	sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
@@ -150,9 +162,6 @@ func minit() {
 
 // Called from dropm to undo the effect of an minit.
 func unminit() {
-	_g_ := getg()
-	smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
-	sigprocmask(_SIG_SETMASK, smask, nil)
 	signalstack(nil)
 }
 
@@ -222,6 +231,7 @@ func getsig(i int32) uintptr {
 	return sa.sa_sigaction
 }
 
+//go:nosplit
 func signalstack(s *stack) {
 	var st sigaltstackt
 	if s == nil {
diff --git a/src/runtime/os1_freebsd.go b/src/runtime/os1_freebsd.go
index f3519f3..b18e60f 100644
--- a/src/runtime/os1_freebsd.go
+++ b/src/runtime/os1_freebsd.go
@@ -118,6 +118,7 @@ func mpreinit(mp *m) {
 	mp.gsignal.m = mp
 }
 
+//go:nosplit
 func msigsave(mp *m) {
 	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
 	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -126,6 +127,17 @@ func msigsave(mp *m) {
 	sigprocmask(_SIG_SETMASK, nil, smask)
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+	sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+	sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
@@ -151,10 +163,8 @@ func minit() {
 }
 
 // Called from dropm to undo the effect of an minit.
+//go:nosplit
 func unminit() {
-	_g_ := getg()
-	smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
-	sigprocmask(_SIG_SETMASK, smask, nil)
 	signalstack(nil)
 }
 
@@ -224,6 +234,7 @@ func getsig(i int32) uintptr {
 	return sa.sa_handler
 }
 
+//go:nosplit
 func signalstack(s *stack) {
 	var st stackt
 	if s == nil {
diff --git a/src/runtime/os1_linux.go b/src/runtime/os1_linux.go
index c23dc30..166014b 100644
--- a/src/runtime/os1_linux.go
+++ b/src/runtime/os1_linux.go
@@ -198,6 +198,7 @@ func mpreinit(mp *m) {
 	mp.gsignal.m = mp
 }
 
+//go:nosplit
 func msigsave(mp *m) {
 	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
 	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -206,6 +207,17 @@ func msigsave(mp *m) {
 	rtsigprocmask(_SIG_SETMASK, nil, smask, int32(unsafe.Sizeof(*smask)))
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+	rtsigprocmask(_SIG_SETMASK, smask, nil, int32(unsafe.Sizeof(*smask)))
+}
+
+//go:nosplit
+func sigblock() {
+	rtsigprocmask(_SIG_SETMASK, &sigset_all, nil, int32(unsafe.Sizeof(sigset_all)))
+}
+
 func gettid() uint32
 
 // Called to initialize a new m (including the bootstrap m).
@@ -229,10 +241,8 @@ func minit() {
 }
 
 // Called from dropm to undo the effect of an minit.
+//go:nosplit
 func unminit() {
-	_g_ := getg()
-	smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
-	rtsigprocmask(_SIG_SETMASK, smask, nil, int32(unsafe.Sizeof(*smask)))
 	signalstack(nil)
 }
 
@@ -293,7 +303,8 @@ func setsig(i int32, fn uintptr, restart bool) {
 		fn = funcPC(sigtramp)
 	}
 	sa.sa_handler = fn
-	if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
+	// Qemu rejects rt_sigaction of SIGRTMAX (64).
+	if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 && i != 64 {
 		throw("rt_sigaction failure")
 	}
 }
@@ -325,6 +336,7 @@ func getsig(i int32) uintptr {
 	return sa.sa_handler
 }
 
+//go:nosplit
 func signalstack(s *stack) {
 	var st sigaltstackt
 	if s == nil {
diff --git a/src/runtime/os1_nacl.go b/src/runtime/os1_nacl.go
index 143752a..30c3529 100644
--- a/src/runtime/os1_nacl.go
+++ b/src/runtime/os1_nacl.go
@@ -15,9 +15,18 @@ func mpreinit(mp *m) {
 
 func sigtramp()
 
+//go:nosplit
 func msigsave(mp *m) {
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+}
+
+//go:nosplit
+func sigblock() {
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
diff --git a/src/runtime/os1_netbsd.go b/src/runtime/os1_netbsd.go
index cacd606..f4c5ca4 100644
--- a/src/runtime/os1_netbsd.go
+++ b/src/runtime/os1_netbsd.go
@@ -138,6 +138,7 @@ func mpreinit(mp *m) {
 	mp.gsignal.m = mp
 }
 
+//go:nosplit
 func msigsave(mp *m) {
 	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
 	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -146,6 +147,17 @@ func msigsave(mp *m) {
 	sigprocmask(_SIG_SETMASK, nil, smask)
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+	sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+	sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
@@ -166,11 +178,8 @@ func minit() {
 }
 
 // Called from dropm to undo the effect of an minit.
+//go:nosplit
 func unminit() {
-	_g_ := getg()
-	smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
-	sigprocmask(_SIG_SETMASK, smask, nil)
-
 	signalstack(nil)
 }
 
@@ -213,6 +222,7 @@ func getsig(i int32) uintptr {
 	return sa.sa_sigaction
 }
 
+//go:nosplit
 func signalstack(s *stack) {
 	var st sigaltstackt
 	if s == nil {
diff --git a/src/runtime/os1_openbsd.go b/src/runtime/os1_openbsd.go
index 24a095b..88f6aef 100644
--- a/src/runtime/os1_openbsd.go
+++ b/src/runtime/os1_openbsd.go
@@ -148,6 +148,7 @@ func mpreinit(mp *m) {
 	mp.gsignal.m = mp
 }
 
+//go:nosplit
 func msigsave(mp *m) {
 	smask := (*uint32)(unsafe.Pointer(&mp.sigmask))
 	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -156,6 +157,17 @@ func msigsave(mp *m) {
 	*smask = sigprocmask(_SIG_BLOCK, 0)
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+	smask := *(*uint32)(unsafe.Pointer(&mp.sigmask))
+	sigprocmask(_SIG_SETMASK, smask)
+}
+
+//go:nosplit
+func sigblock() {
+	sigprocmask(_SIG_SETMASK, sigset_all)
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
@@ -178,10 +190,8 @@ func minit() {
 }
 
 // Called from dropm to undo the effect of an minit.
+//go:nosplit
 func unminit() {
-	_g_ := getg()
-	smask := *(*uint32)(unsafe.Pointer(&_g_.m.sigmask))
-	sigprocmask(_SIG_SETMASK, smask)
 	signalstack(nil)
 }
 
@@ -224,6 +234,7 @@ func getsig(i int32) uintptr {
 	return sa.sa_sigaction
 }
 
+//go:nosplit
 func signalstack(s *stack) {
 	var st stackt
 	if s == nil {
diff --git a/src/runtime/os1_plan9.go b/src/runtime/os1_plan9.go
index 9615b6d..38125a0 100644
--- a/src/runtime/os1_plan9.go
+++ b/src/runtime/os1_plan9.go
@@ -21,6 +21,12 @@ func mpreinit(mp *m) {
 func msigsave(mp *m) {
 }
 
+func msigrestore(mp *m) {
+}
+
+func sigblock() {
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
diff --git a/src/runtime/os1_windows.go b/src/runtime/os1_windows.go
index f608b4a..d012034 100644
--- a/src/runtime/os1_windows.go
+++ b/src/runtime/os1_windows.go
@@ -284,9 +284,18 @@ func newosproc(mp *m, stk unsafe.Pointer) {
 func mpreinit(mp *m) {
 }
 
+//go:nosplit
 func msigsave(mp *m) {
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+}
+
+//go:nosplit
+func sigblock() {
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
@@ -296,6 +305,7 @@ func minit() {
 }
 
 // Called from dropm to undo the effect of an minit.
+//go:nosplit
 func unminit() {
 	tp := &getg().m.thread
 	stdcall1(_CloseHandle, *tp)
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 792188f..b27a675 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -192,6 +192,7 @@ func mpreinit(mp *m) {
 
 func miniterrno()
 
+//go:nosplit
 func msigsave(mp *m) {
 	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
 	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -200,6 +201,17 @@ func msigsave(mp *m) {
 	sigprocmask(_SIG_SETMASK, nil, smask)
 }
 
+//go:nosplit
+func msigrestore(mp *m) {
+	smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+	sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+	sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
 // Called to initialize a new m (including the bootstrap m).
 // Called on the new thread, can not allocate memory.
 func minit() {
@@ -220,10 +232,6 @@ func minit() {
 
 // Called from dropm to undo the effect of an minit.
 func unminit() {
-	_g_ := getg()
-	smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
-	sigprocmask(_SIG_SETMASK, smask, nil)
-
 	signalstack(nil)
 }
 
@@ -289,6 +297,7 @@ func getsig(i int32) uintptr {
 	return *((*uintptr)(unsafe.Pointer(&sa._funcptr)))
 }
 
+//go:nosplit
 func signalstack(s *stack) {
 	var st sigaltstackt
 	if s == nil {
@@ -493,6 +502,7 @@ func sigaltstack(ss *sigaltstackt, oss *sigaltstackt) /* int32 */ {
 	sysvicall2(&libc_sigaltstack, uintptr(unsafe.Pointer(ss)), uintptr(unsafe.Pointer(oss)))
 }
 
+//go:nosplit
 func sigprocmask(how int32, set *sigset, oset *sigset) /* int32 */ {
 	sysvicall3(&libc_sigprocmask, uintptr(how), uintptr(unsafe.Pointer(set)), uintptr(unsafe.Pointer(oset)))
 }
diff --git a/src/runtime/os_solaris.go b/src/runtime/os_solaris.go
index fd20a5c..95269a7 100644
--- a/src/runtime/os_solaris.go
+++ b/src/runtime/os_solaris.go
@@ -15,71 +15,71 @@ func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
 
 //go:nosplit
 func sysvicall0(fn *libcFunc) uintptr {
-	libcall := &getg().m.libcall
+	var libcall libcall
 	libcall.fn = uintptr(unsafe.Pointer(fn))
 	libcall.n = 0
 	libcall.args = uintptr(unsafe.Pointer(fn)) // it's unused but must be non-nil, otherwise crashes
-	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
 	return libcall.r1
 }
 
 //go:nosplit
 func sysvicall1(fn *libcFunc, a1 uintptr) uintptr {
-	libcall := &getg().m.libcall
+	var libcall libcall
 	libcall.fn = uintptr(unsafe.Pointer(fn))
 	libcall.n = 1
 	// TODO(rsc): Why is noescape necessary here and below?
 	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
-	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
 	return libcall.r1
 }
 
 //go:nosplit
 func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr {
-	libcall := &getg().m.libcall
+	var libcall libcall
 	libcall.fn = uintptr(unsafe.Pointer(fn))
 	libcall.n = 2
 	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
-	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
 	return libcall.r1
 }
 
 //go:nosplit
 func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr {
-	libcall := &getg().m.libcall
+	var libcall libcall
 	libcall.fn = uintptr(unsafe.Pointer(fn))
 	libcall.n = 3
 	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
-	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
 	return libcall.r1
 }
 
 //go:nosplit
 func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
-	libcall := &getg().m.libcall
+	var libcall libcall
 	libcall.fn = uintptr(unsafe.Pointer(fn))
 	libcall.n = 4
 	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
-	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
 	return libcall.r1
 }
 
 //go:nosplit
 func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
-	libcall := &getg().m.libcall
+	var libcall libcall
 	libcall.fn = uintptr(unsafe.Pointer(fn))
 	libcall.n = 5
 	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
-	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
 	return libcall.r1
 }
 
 //go:nosplit
 func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
-	libcall := &getg().m.libcall
+	var libcall libcall
 	libcall.fn = uintptr(unsafe.Pointer(fn))
 	libcall.n = 6
 	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
-	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+	asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
 	return libcall.r1
 }
diff --git a/src/runtime/proc1.go b/src/runtime/proc1.go
index 09cb775..72ab524 100644
--- a/src/runtime/proc1.go
+++ b/src/runtime/proc1.go
@@ -414,13 +414,7 @@ func scang(gp *g) {
 			// the goroutine until we're done.
 			if castogscanstatus(gp, s, s|_Gscan) {
 				if !gp.gcscandone {
-					// Coordinate with traceback
-					// in sigprof.
-					for !cas(&gp.stackLock, 0, 1) {
-						osyield()
-					}
 					scanstack(gp)
-					atomicstore(&gp.stackLock, 0)
 					gp.gcscandone = true
 				}
 				restartg(gp)
@@ -951,6 +945,15 @@ func needm(x byte) {
 	mp.needextram = mp.schedlink == 0
 	unlockextra(mp.schedlink.ptr())
 
+	// Save and block signals before installing g.
+	// Once g is installed, any incoming signals will try to execute,
+	// but we won't have the sigaltstack settings and other data
+	// set up appropriately until the end of minit, which will
+	// unblock the signals. This is the same dance as when
+	// starting a new m to run Go code via newosproc.
+	msigsave(mp)
+	sigblock()
+
 	// Install g (= m->g0) and set the stack bounds
 	// to match the current stack. We don't actually know
 	// how big the stack is, like we don't know how big any
@@ -962,7 +965,6 @@ func needm(x byte) {
 	_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
 	_g_.stackguard0 = _g_.stack.lo + _StackGuard
 
-	msigsave(mp)
 	// Initialize this thread to use the m.
 	asminit()
 	minit()
@@ -1033,9 +1035,6 @@ func newextram() {
 // We may have to keep the current version on systems with cgo
 // but without pthreads, like Windows.
 func dropm() {
-	// Undo whatever initialization minit did during needm.
-	unminit()
-
 	// Clear m and g, and return m to the extra list.
 	// After the call to setg we can only call nosplit functions
 	// with no pointer manipulation.
@@ -1043,7 +1042,16 @@ func dropm() {
 	mnext := lockextra(true)
 	mp.schedlink.set(mnext)
 
+	// Block signals before unminit.
+	// Unminit unregisters the signal handling stack (but needs g on some systems).
+	// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
+	// It's important not to try to handle a signal between those two steps.
+	sigblock()
+	unminit()
 	setg(nil)
+	msigrestore(mp)
+
+	// Commit the release of mp.
 	unlockextra(mp)
 }
 
@@ -2500,11 +2508,6 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
 	// Profiling runs concurrently with GC, so it must not allocate.
 	mp.mallocing++
 
-	// Coordinate with stack barrier insertion in scanstack.
-	for !cas(&gp.stackLock, 0, 1) {
-		osyield()
-	}
-
 	// Define that a "user g" is a user-created goroutine, and a "system g"
 	// is one that is m->g0 or m->gsignal.
 	//
@@ -2571,8 +2574,18 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
 	// transition. We simply require that g and SP match and that the PC is not
 	// in gogo.
 	traceback := true
+	haveStackLock := false
 	if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
 		traceback = false
+	} else if gp.m.curg != nil {
+		if gcTryLockStackBarriers(gp.m.curg) {
+			haveStackLock = true
+		} else {
+			// Stack barriers are being inserted or
+			// removed, so we can't get a consistent
+			// traceback right now.
+			traceback = false
+		}
 	}
 	var stk [maxCPUProfStack]uintptr
 	n := 0
@@ -2582,7 +2595,14 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
 		// This is especially important on windows, since all syscalls are cgo calls.
 		n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0)
 	} else if traceback {
-		n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
+		flags := uint(_TraceTrap | _TraceJumpStack)
+		if gp.m.curg != nil && readgstatus(gp.m.curg) == _Gcopystack {
+			// We can traceback the system stack, but
+			// don't jump to the potentially inconsistent
+			// user stack.
+			flags &^= _TraceJumpStack
+		}
+		n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags)
 	}
 	if !traceback || n <= 0 {
 		// Normal traceback is impossible or has failed.
@@ -2608,7 +2628,9 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
 			}
 		}
 	}
-	atomicstore(&gp.stackLock, 0)
+	if haveStackLock {
+		gcUnlockStackBarriers(gp.m.curg)
+	}
 
 	if prof.hz != 0 {
 		// Simple cas-lock to coordinate with setcpuprofilerate.
diff --git a/src/runtime/race/testdata/issue12225_test.go b/src/runtime/race/testdata/issue12225_test.go
new file mode 100644
index 0000000..3b0b8ec
--- /dev/null
+++ b/src/runtime/race/testdata/issue12225_test.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+// golang.org/issue/12225
+// The test is that this compiles at all.
+
+func issue12225() {
+	println(*(*int)(unsafe.Pointer(&convert("")[0])))
+	println(*(*int)(unsafe.Pointer(&[]byte("")[0])))
+}
diff --git a/src/runtime/rt0_darwin_arm.s b/src/runtime/rt0_darwin_arm.s
index 95a2b17..d609850 100644
--- a/src/runtime/rt0_darwin_arm.s
+++ b/src/runtime/rt0_darwin_arm.s
@@ -16,27 +16,34 @@ TEXT _rt0_arm_darwin(SB),7,$-4
 //
 // Note that all currently shipping darwin/arm platforms require
 // cgo and do not support c-shared.
-TEXT _rt0_arm_darwin_lib(SB),NOSPLIT,$12
+TEXT _rt0_arm_darwin_lib(SB),NOSPLIT,$0
+	// R11 is REGTMP, reserved for liblink. It is used below to
+	// move R0/R1 into globals. However in the darwin ARMv7 calling
+	// convention, it is a callee-saved register. So we save it to a
+	// temporary register.
+	MOVW  R11, R2
 	MOVW  R0, _rt0_arm_darwin_lib_argc<>(SB)
 	MOVW  R1, _rt0_arm_darwin_lib_argv<>(SB)
 
 	// Create a new thread to do the runtime initialization and return.
-	MOVW  _cgo_sys_thread_create(SB), R4
-	CMP   $0, R4
+	MOVW  _cgo_sys_thread_create(SB), R3
+	CMP   $0, R3
 	B.EQ  nocgo
 	MOVW  $_rt0_arm_darwin_lib_go(SB), R0
 	MOVW  $0, R1
-	BL    (R4)
+	MOVW  R2, R11
+	BL    (R3)
 	RET
 nocgo:
 	MOVW  $0x400000, R0
-	MOVW  $_rt0_arm_darwin_lib_go(SB), R1
-	MOVW  $0, R2
-	MOVW  R0,  (R13) // stacksize
-	MOVW  R1, 4(R13) // fn
-	MOVW  R2, 8(R13) // fnarg
-	MOVW  $runtime·newosproc0(SB), R4
-	BL    (R4)
+	MOVW  R0, (R13) // stacksize
+	MOVW  $_rt0_arm_darwin_lib_go(SB), R0
+	MOVW  R0, 4(R13) // fn
+	MOVW  $0, R0
+	MOVW  R0, 8(R13) // fnarg
+	MOVW  $runtime·newosproc0(SB), R3
+	MOVW  R2, R11
+	BL    (R3)
 	RET
 
 TEXT _rt0_arm_darwin_lib_go(SB),NOSPLIT,$0
diff --git a/src/runtime/signal_linux.go b/src/runtime/signal_linux.go
index 2f25b59..2cc76b2 100644
--- a/src/runtime/signal_linux.go
+++ b/src/runtime/signal_linux.go
@@ -44,8 +44,8 @@ var sigtable = [...]sigTabT{
 	/* 29 */ {_SigNotify, "SIGIO: i/o now possible"},
 	/* 30 */ {_SigNotify, "SIGPWR: power failure restart"},
 	/* 31 */ {_SigNotify, "SIGSYS: bad system call"},
-	/* 32 */ {_SigSetStack, "signal 32"}, /* SIGCANCEL; see issue 6997 */
-	/* 33 */ {_SigSetStack, "signal 33"}, /* SIGSETXID; see issue 3871, 9400 */
+	/* 32 */ {_SigSetStack + _SigUnblock, "signal 32"}, /* SIGCANCEL; see issue 6997 */
+	/* 33 */ {_SigSetStack + _SigUnblock, "signal 33"}, /* SIGSETXID; see issues 3871, 9400, 12498 */
 	/* 34 */ {_SigNotify, "signal 34"},
 	/* 35 */ {_SigNotify, "signal 35"},
 	/* 36 */ {_SigNotify, "signal 36"},
diff --git a/src/runtime/stack1.go b/src/runtime/stack1.go
index efcb5f2..19634ef 100644
--- a/src/runtime/stack1.go
+++ b/src/runtime/stack1.go
@@ -609,6 +609,10 @@ func copystack(gp *g, newsize uintptr) {
 		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
 	}
 
+	// Disallow sigprof scans of this stack and block if there's
+	// one in progress.
+	gcLockStackBarriers(gp)
+
 	// adjust pointers in the to-be-copied frames
 	var adjinfo adjustinfo
 	adjinfo.old = old
@@ -640,6 +644,8 @@ func copystack(gp *g, newsize uintptr) {
 	gp.stackAlloc = newsize
 	gp.stkbar = newstkbar
 
+	gcUnlockStackBarriers(gp)
+
 	// free old stack
 	if stackPoisonCopy != 0 {
 		fillstack(old, 0xfc)
diff --git a/src/runtime/sys_solaris_amd64.s b/src/runtime/sys_solaris_amd64.s
index e431564..18098d2 100644
--- a/src/runtime/sys_solaris_amd64.s
+++ b/src/runtime/sys_solaris_amd64.s
@@ -80,6 +80,8 @@ TEXT runtime·asmsysvicall6(SB),NOSPLIT,$0
 
 	get_tls(CX)
 	MOVQ	g(CX), BX
+	CMPQ	BX, $0
+	JEQ	skiperrno1
 	MOVQ	g_m(BX), BX
 	MOVQ	m_perrno(BX), DX
 	CMPQ	DX, $0
@@ -108,6 +110,8 @@ skipargs:
 
 	get_tls(CX)
 	MOVQ	g(CX), BX
+	CMPQ	BX, $0
+	JEQ	skiperrno2
 	MOVQ	g_m(BX), BX
 	MOVQ	m_perrno(BX), AX
 	CMPQ	AX, $0
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 2def359..fa13713 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -142,7 +142,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
 
 	// Fix up returns to the stack barrier by fetching the
 	// original return PC from gp.stkbar.
-	stkbar := gp.stkbar[gp.stkbarPos:]
+	stkbarG := gp
+	stkbar := stkbarG.stkbar[stkbarG.stkbarPos:]
 
 	if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
 		if gp.syscallsp != 0 {
@@ -188,6 +189,34 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
 	}
 
 	f := findfunc(frame.pc)
+	if f != nil && f.entry == stackBarrierPC {
+		// We got caught in the middle of a stack barrier
+		// (presumably by a signal), so stkbar may be
+		// inconsistent with the barriers on the stack.
+		// Simulate the completion of the barrier.
+		//
+		// On x86, SP will be exactly one word above
+		// savedLRPtr. On LR machines, SP will be above
+		// savedLRPtr by some frame size.
+		var stkbarPos uintptr
+		if len(stkbar) > 0 && stkbar[0].savedLRPtr < sp0 {
+			// stackBarrier has not incremented stkbarPos.
+			stkbarPos = gp.stkbarPos
+		} else if gp.stkbarPos > 0 && gp.stkbar[gp.stkbarPos-1].savedLRPtr < sp0 {
+			// stackBarrier has incremented stkbarPos.
+			stkbarPos = gp.stkbarPos - 1
+		} else {
+			printlock()
+			print("runtime: failed to unwind through stackBarrier at SP ", hex(sp0), "; ")
+			gcPrintStkbars(gp, int(gp.stkbarPos))
+			print("\n")
+			throw("inconsistent state in stackBarrier")
+		}
+
+		frame.pc = gp.stkbar[stkbarPos].savedLRVal
+		stkbar = gp.stkbar[stkbarPos+1:]
+		f = findfunc(frame.pc)
+	}
 	if f == nil {
 		if callback != nil {
 			print("runtime: unknown pc ", hex(frame.pc), "\n")
@@ -216,7 +245,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
 			sp := frame.sp
 			if flags&_TraceJumpStack != 0 && f.entry == systemstackPC && gp == g.m.g0 && gp.m.curg != nil {
 				sp = gp.m.curg.sched.sp
-				stkbar = gp.m.curg.stkbar[gp.m.curg.stkbarPos:]
+				stkbarG = gp.m.curg
+				stkbar = stkbarG.stkbar[stkbarG.stkbarPos:]
 			}
 			frame.fp = sp + uintptr(funcspdelta(f, frame.pc))
 			if !usesLR {
@@ -254,9 +284,9 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
 			}
 			if frame.lr == stackBarrierPC {
 				// Recover original PC.
-				if stkbar[0].savedLRPtr != lrPtr {
+				if len(stkbar) == 0 || stkbar[0].savedLRPtr != lrPtr {
 					print("found next stack barrier at ", hex(lrPtr), "; expected ")
-					gcPrintStkbars(stkbar)
+					gcPrintStkbars(stkbarG, len(stkbarG.stkbar)-len(stkbar))
 					print("\n")
 					throw("missed stack barrier")
 				}
@@ -476,7 +506,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
 
 	if callback != nil && n < max && len(stkbar) > 0 {
 		print("runtime: g", gp.goid, ": leftover stack barriers ")
-		gcPrintStkbars(stkbar)
+		gcPrintStkbars(stkbarG, len(stkbarG.stkbar)-len(stkbar))
 		print("\n")
 		throw("traceback has leftover stack barriers")
 	}
diff --git a/test/fixedbugs/issue11987.go b/test/fixedbugs/issue11987.go
new file mode 100644
index 0000000..78fc28b
--- /dev/null
+++ b/test/fixedbugs/issue11987.go
@@ -0,0 +1,23 @@
+// run
+
+// Copyright 2015 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 11987. The ppc64 SRADCC instruction was misassembled in a way
+// lost bit 5 of the immediate so v>>32 was assembled as v>>0.  SRADCC
+// is only ever inserted by peep so it's hard to be sure when it will
+// be used. This formulation worked when the bug was fixed.
+
+package main
+
+import "fmt"
+
+var v int64 = 0x80000000
+
+func main() {
+	s := fmt.Sprintf("%v", v>>32 == 0)
+	if s != "true" {
+		fmt.Printf("BUG: v>>32 == 0 evaluated as %q\n", s)
+	}
+}
diff --git a/test/fixedbugs/issue12686.go b/test/fixedbugs/issue12686.go
new file mode 100644
index 0000000..5783c99
--- /dev/null
+++ b/test/fixedbugs/issue12686.go
@@ -0,0 +1,16 @@
+// compile
+
+// Copyright 2015 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// golang.org/issue/12686.
+// interesting because it's a non-constant but ideal value
+// and we used to incorrectly attach a constant Val to the Node.
+
+package p
+
+func f(i uint) uint {
+	x := []uint{1 << i}
+	return x[0]
+}
diff --git a/test/fixedbugs/issue13160.go b/test/fixedbugs/issue13160.go
new file mode 100644
index 0000000..7eb4811
--- /dev/null
+++ b/test/fixedbugs/issue13160.go
@@ -0,0 +1,70 @@
+// run
+
+// Copyright 2015 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"runtime"
+)
+
+const N = 100000
+
+func main() {
+	// Allocate more Ps than processors.  This raises
+	// the chance that we get interrupted by the OS
+	// in exactly the right (wrong!) place.
+	p := runtime.NumCPU()
+	runtime.GOMAXPROCS(2 * p)
+
+	// Allocate some pointers.
+	ptrs := make([]*int, p)
+	for i := 0; i < p; i++ {
+		ptrs[i] = new(int)
+	}
+
+	// Arena where we read and write pointers like crazy.
+	collider := make([]*int, p)
+
+	done := make(chan struct{}, 2*p)
+
+	// Start writers.  They alternately write a pointer
+	// and nil to a slot in the collider.
+	for i := 0; i < p; i++ {
+		i := i
+		go func() {
+			for j := 0; j < N; j++ {
+				// Write a pointer using memmove.
+				copy(collider[i:i+1], ptrs[i:i+1])
+				// Write nil using memclr.
+				// (This is a magic loop that gets lowered to memclr.)
+				r := collider[i : i+1]
+				for k := range r {
+					r[k] = nil
+				}
+			}
+			done <- struct{}{}
+		}()
+	}
+	// Start readers.  They read pointers from slots
+	// and make sure they are valid.
+	for i := 0; i < p; i++ {
+		i := i
+		go func() {
+			for j := 0; j < N; j++ {
+				var ptr [1]*int
+				copy(ptr[:], collider[i:i+1])
+				if ptr[0] != nil && ptr[0] != ptrs[i] {
+					panic(fmt.Sprintf("bad pointer read %p!", ptr[0]))
+				}
+			}
+			done <- struct{}{}
+		}()
+	}
+	for i := 0; i < 2*p; i++ {
+		<-done
+	}
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-golang/golang.git



More information about the pkg-golang-commits mailing list