misc/cgo/testcarchive/carchive_test.go | 20 ++++++++++++--------
src/cmd/compile/internal/noder/stencil.go | 17 +++++++++++------
src/cmd/compile/internal/walk/order.go | 8 +++++++-
src/cmd/go/go_test.go | 9 +++++++++
src/cmd/go/internal/fsys/fsys.go | 89 +++++++++++++++++++++++++++++++++++++++++++----------
src/cmd/go/internal/load/pkg.go | 9 ++++++++-
src/cmd/go/internal/modindex/index_test.go | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++++
src/cmd/go/internal/modindex/read.go | 456 +++++++++++++++++++++++++----------------------------
src/cmd/go/internal/modindex/scan.go | 2 ++
src/cmd/go/internal/modindex/write.go | 46 ++++++++++++++++++----------------------------
src/cmd/go/internal/modload/search.go | 13 ++++++-------
src/cmd/go/internal/work/init.go | 6 +++++-
src/cmd/go/script_test.go | 19 ++++++-------------
src/cmd/go/testdata/script/README | 17 ++++++++++++-----
src/cmd/go/testdata/script/build_buildvcs_auto.txt | 4 ++++
src/cmd/go/testdata/script/fsys_walk.txt | 6 ++++++
src/cmd/go/testdata/script/index.txt | 6 ++++++
src/cmd/go/testdata/script/list_permissions.txt => src/cmd/go/testdata/script/list_perm.txt | 3 +--
src/cmd/go/testdata/script/mod_perm.txt | 23 +++++++++++++++++++++++
src/cmd/link/internal/ld/lib.go | 13 +++++++++++++
src/cmd/nm/nm_test.go | 13 ++-----------
src/cmd/trace/main.go | 4 ++--
src/cmd/trace/trace.go | 8 +-------
src/compress/gzip/gunzip.go | 60 ++++++++++++++++++++++++++---------------------------
src/compress/gzip/gunzip_test.go | 16 ++++++++++++++++
src/crypto/x509/parser.go | 22 ++++++++++++++--------
src/crypto/x509/x509.go | 11 +++++++++--
src/crypto/x509/x509_test.go | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++
src/database/sql/sql_test.go | 45 +++++++++++++++------------------------------
src/encoding/gob/decode.go | 19 ++++++++++++-------
src/encoding/gob/gobencdec_test.go | 24 ++++++++++++++++++++++++
src/encoding/xml/read.go | 42 +++++++++++++++++++++++++++---------------
src/encoding/xml/read_test.go | 32 ++++++++++++++++++++++++++++++++
src/go/parser/interface.go | 10 ++++++++--
src/go/parser/parser.go | 54 +++++++++++++++++++++++++++++++++++++++++++++++++----
src/go/parser/parser_test.go | 169 +++++++++++++++++++++++++++++++++++++++++++++++++++++
src/go/parser/resolver.go | 9 +++++++--
src/internal/trace/goroutines.go | 37 ++++++++++++++++++++++++++++---------
src/io/fs/glob.go | 14 ++++++++++++--
src/io/fs/glob_test.go | 10 ++++++++++
src/net/http/fs.go | 1 +
src/net/http/fs_test.go | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++
src/net/http/request.go | 4 ++--
src/net/http/server.go | 22 +++++++++++++++-------
src/net/url/url.go | 2 +-
src/net/url/url_test.go | 10 ++++++++++
src/path/filepath/match.go | 12 +++++++++++-
src/path/filepath/match_test.go | 10 ++++++++++
src/runtime/mcache.go | 36 ++++++++++++++++++++++++++++++------
src/runtime/race/README | 6 +++---
src/runtime/traceback.go | 2 +-
src/syscall/exec_linux.go | 4 ++--
test/fixedbugs/issue53635.go | 31 +++++++++++++++++++++++++++++++
test/run.go | 1 -
diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go
index d36b97b70ecc35d74be60afbc084ac7faa3e6e61..c409c317dc7dd8ae75efcb371a885a97f26e63cf 100644
--- a/misc/cgo/testcarchive/carchive_test.go
+++ b/misc/cgo/testcarchive/carchive_test.go
@@ -205,6 +205,7 @@
func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
t.Helper()
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
+ cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
@@ -238,7 +239,7 @@
binArgs := append(cmdToRun(exe), "arg1", "arg2")
cmd = exec.Command(binArgs[0], binArgs[1:]...)
if runtime.Compiler == "gccgo" {
- cmd.Env = append(os.Environ(), "GCCGO=1")
+ cmd.Env = append(cmd.Environ(), "GCCGO=1")
}
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
@@ -822,9 +823,15 @@ case "windows", "darwin", "ios", "plan9":
t.Skipf("skipping PIE test on %s", GOOS)
}
+ libgoa := "libgo.a"
+ if runtime.Compiler == "gccgo" {
+ libgoa = "liblibgo.a"
+ }
+
if !testWork {
defer func() {
os.Remove("testp" + exeSuffix)
+ os.Remove(libgoa)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
}
@@ -837,18 +844,13 @@ // GOROOT, and we cannot assume that GOROOT is writable. (A non-root user may
// be running this test in a GOROOT owned by root.)
genHeader(t, "p.h", "./p")
- cmd := exec.Command("go", "install", "-buildmode=c-archive", "./libgo")
+ cmd := exec.Command("go", "build", "-buildmode=c-archive", "./libgo")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
- libgoa := "libgo.a"
- if runtime.Compiler == "gccgo" {
- libgoa = "liblibgo.a"
- }
-
- ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", filepath.Join(libgodir, libgoa))
+ ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", libgoa)
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
@@ -1035,6 +1037,7 @@
buildcmd := []string{"go", "install", "-buildmode=c-archive", "./libgo"}
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
+ cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
@@ -1050,6 +1053,7 @@ t.Fatal(err)
}
cmd = exec.Command(buildcmd[0], buildcmd[1:]...)
+ cmd.Env = append(cmd.Environ(), "GO111MODULE=off")
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index cf2f0b38db3e85c522dee02ace88b70c8cdf2846..1534a1fa49ccfd3b4c475839e340baafa68b7ec0 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -1214,6 +1214,9 @@ m := m.(*ir.SwitchStmt)
if m.Tag != nil && m.Tag.Op() == ir.OTYPESW {
break // Nothing to do here for type switches.
}
+ if m.Tag != nil && !types.IsComparable(m.Tag.Type()) {
+ break // Nothing to do here for un-comparable types.
+ }
if m.Tag != nil && !m.Tag.Type().IsEmptyInterface() && m.Tag.Type().HasShape() {
// To implement a switch on a value that is or has a type parameter, we first convert
// that thing we're switching on to an interface{}.
@@ -1654,12 +1657,14 @@ var nameNode *ir.Name
se := call.X.(*ir.SelectorExpr)
if se.X.Type().IsShape() {
// This is a method call enabled by a type bound.
-
- // We need this extra check for method expressions,
- // which don't add in the implicit XDOTs.
- tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
- tmpse = typecheck.AddImplicitDots(tmpse)
- tparam := tmpse.X.Type()
+ tparam := se.X.Type()
+ if call.X.Op() == ir.ODOTMETH {
+ // We need this extra check for method expressions,
+ // which don't add in the implicit XDOTs.
+ tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
+ tmpse = typecheck.AddImplicitDots(tmpse)
+ tparam = tmpse.X.Type()
+ }
if !tparam.IsShape() {
// The method expression is not
// really on a typeparam.
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 8d1089dcc1f51d72d51f256382e8df2d1f4be9ad..2d1e88238ccfc986c19d4048896a51ee6d90cf7c 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -63,7 +63,7 @@ if base.Flag.W > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Sym())
ir.DumpList(s, fn.Body)
}
-
+ ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688.
orderBlock(&fn.Body, map[string][]*ir.Name{})
}
@@ -477,6 +477,12 @@ // orderBlock orders the block of statements in n into a new slice,
// and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type.
func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
+ if len(*n) != 0 {
+ // Set reasonable position for instrumenting code. See issue 53688.
+ // It would be nice if ir.Nodes had a position (the opening {, probably),
+ // but it doesn't. So we use the first statement's position instead.
+ ir.SetPos((*n)[0])
+ }
var order orderState
order.free = free
mark := order.markTemp()
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index b39a62f3e4cc1898536b13dbf21ce1aaad12d23c..c100316f478edd0e54901b1baefdd4c41996cbb2 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -1363,6 +1363,15 @@ return "TMPDIR"
}
}
+func pathEnvName() string {
+ switch runtime.GOOS {
+ case "plan9":
+ return "path"
+ default:
+ return "PATH"
+ }
+}
+
func TestDefaultGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go
index 41d0bbfe660101b904367de19a94e154483b02f2..0d7bef911266b002255216c67dd9d11895a661e4 100644
--- a/src/cmd/go/internal/fsys/fsys.go
+++ b/src/cmd/go/internal/fsys/fsys.go
@@ -6,16 +6,65 @@ import (
"encoding/json"
"errors"
"fmt"
+ "internal/godebug"
"io/fs"
"io/ioutil"
+ "log"
"os"
+ pathpkg "path"
"path/filepath"
"runtime"
+ "runtime/debug"
"sort"
"strings"
+ "sync"
"time"
)
+// Trace emits a trace event for the operation and file path to the trace log,
+// but only when $GODEBUG contains gofsystrace=1.
+// The traces are appended to the file named by the $GODEBUG setting gofsystracelog, or else standard error.
+// For debugging, if the $GODEBUG setting gofsystracestack is non-empty, then trace events for paths
+// matching that glob pattern (using path.Match) will be followed by a full stack trace.
+func Trace(op, path string) {
+ if !doTrace {
+ return
+ }
+ traceMu.Lock()
+ defer traceMu.Unlock()
+ fmt.Fprintf(traceFile, "%d gofsystrace %s %s\n", os.Getpid(), op, path)
+ if traceStack != "" {
+ if match, _ := pathpkg.Match(traceStack, path); match {
+ traceFile.Write(debug.Stack())
+ }
+ }
+}
+
+var (
+ doTrace bool
+ traceStack string
+ traceFile *os.File
+ traceMu sync.Mutex
+)
+
+func init() {
+ if godebug.Get("gofsystrace") != "1" {
+ return
+ }
+ doTrace = true
+ traceStack = godebug.Get("gofsystracestack")
+ if f := godebug.Get("gofsystracelog"); f != "" {
+ // Note: No buffering on writes to this file, so no need to worry about closing it at exit.
+ var err error
+ traceFile, err = os.OpenFile(f, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ traceFile = os.Stderr
+ }
+}
+
// OverlayFile is the path to a text file in the OverlayJSON format.
// It is the value of the -overlay flag.
var OverlayFile string
@@ -86,6 +135,7 @@ if OverlayFile == "" {
return nil
}
+ Trace("ReadFile", OverlayFile)
b, err := os.ReadFile(OverlayFile)
if err != nil {
return fmt.Errorf("reading overlay file: %v", err)
@@ -191,6 +241,7 @@
// IsDir returns true if path is a directory on disk or in the
// overlay.
func IsDir(path string) (bool, error) {
+ Trace("IsDir", path)
path = canonicalize(path)
if _, ok := parentIsOverlayFile(path); ok {
@@ -260,6 +311,7 @@
// ReadDir provides a slice of fs.FileInfo entries corresponding
// to the overlaid files in the directory.
func ReadDir(dir string) ([]fs.FileInfo, error) {
+ Trace("ReadDir", dir)
dir = canonicalize(dir)
if _, ok := parentIsOverlayFile(dir); ok {
return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir}
@@ -327,11 +379,17 @@ }
// Open opens the file at or overlaid on the given path.
func Open(path string) (*os.File, error) {
- return OpenFile(path, os.O_RDONLY, 0)
+ Trace("Open", path)
+ return openFile(path, os.O_RDONLY, 0)
}
// OpenFile opens the file at or overlaid on the given path with the flag and perm.
func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
+ Trace("OpenFile", path)
+ return openFile(path, flag, perm)
+}
+
+func openFile(path string, flag int, perm os.FileMode) (*os.File, error) {
cpath := canonicalize(path)
if node, ok := overlay[cpath]; ok {
// Opening a file in the overlay.
@@ -360,6 +418,7 @@
// IsDirWithGoFiles reports whether dir is a directory containing Go files
// either on disk or in the overlay.
func IsDirWithGoFiles(dir string) (bool, error) {
+ Trace("IsDirWithGoFiles", dir)
fis, err := ReadDir(dir)
if os.IsNotExist(err) || errors.Is(err, errNotDir) {
return false, nil
@@ -405,28 +464,20 @@
// walk recursively descends path, calling walkFn. Copied, with some
// modifications from path/filepath.walk.
func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
- if !info.IsDir() {
- return walkFn(path, info, nil)
+ if err := walkFn(path, info, nil); err != nil || !info.IsDir() {
+ return err
}
- fis, readErr := ReadDir(path)
- walkErr := walkFn(path, info, readErr)
- // If readErr != nil, walk can't walk into this directory.
- // walkErr != nil means walkFn want walk to skip this directory or stop walking.
- // Therefore, if one of readErr and walkErr isn't nil, walk will return.
- if readErr != nil || walkErr != nil {
- // The caller's behavior is controlled by the return value, which is decided
- // by walkFn. walkFn may ignore readErr and return nil.
- // If walkFn returns SkipDir, it will be handled by the caller.
- // So walk should return whatever walkFn returns.
- return walkErr
+ fis, err := ReadDir(path)
+ if err != nil {
+ return walkFn(path, info, err)
}
for _, fi := range fis {
filename := filepath.Join(path, fi.Name())
- if walkErr = walk(filename, fi, walkFn); walkErr != nil {
- if !fi.IsDir() || walkErr != filepath.SkipDir {
- return walkErr
+ if err := walk(filename, fi, walkFn); err != nil {
+ if !fi.IsDir() || err != filepath.SkipDir {
+ return err
}
}
}
@@ -436,6 +487,7 @@
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root.
func Walk(root string, walkFn filepath.WalkFunc) error {
+ Trace("Walk", root)
info, err := Lstat(root)
if err != nil {
err = walkFn(root, nil, err)
@@ -450,11 +502,13 @@ }
// lstat implements a version of os.Lstat that operates on the overlay filesystem.
func Lstat(path string) (fs.FileInfo, error) {
+ Trace("Lstat", path)
return overlayStat(path, os.Lstat, "lstat")
}
// Stat implements a version of os.Stat that operates on the overlay filesystem.
func Stat(path string) (fs.FileInfo, error) {
+ Trace("Stat", path)
return overlayStat(path, os.Stat, "stat")
}
@@ -528,6 +582,7 @@ func (f fakeDir) Sys() any { return nil }
// Glob is like filepath.Glob but uses the overlay file system.
func Glob(pattern string) (matches []string, err error) {
+ Trace("Glob", pattern)
// Check pattern is well-formed.
if _, err := filepath.Match(pattern, ""); err != nil {
return nil, err
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index fcb72b07b2a6b36c0b06f7983b9feca96aafcabe..046f508545499d95bd187001bcb30a0376f47950 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -877,7 +877,14 @@ var buildMode build.ImportMode
if !cfg.ModulesEnabled {
buildMode = build.ImportComment
}
- if modroot := modload.PackageModRoot(ctx, r.path); modroot != "" {
+ modroot := modload.PackageModRoot(ctx, r.path)
+ if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) {
+ modroot = cfg.GOROOTsrc
+ if str.HasPathPrefix(r.dir, cfg.GOROOTsrc+string(filepath.Separator)+"cmd") {
+ modroot += string(filepath.Separator) + "cmd"
+ }
+ }
+ if modroot != "" {
if rp, err := modindex.GetPackage(modroot, r.dir); err == nil {
data.p, data.err = rp.Import(cfg.BuildContext, buildMode)
goto Happy
diff --git a/src/cmd/go/internal/modindex/index_test.go b/src/cmd/go/internal/modindex/index_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c072f909d3b64163b98b38d4cd617311516cb2a
--- /dev/null
+++ b/src/cmd/go/internal/modindex/index_test.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "go/build"
+ "internal/diff"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+func init() {
+ isTest = true
+ enabled = true // to allow GODEBUG=goindex=0 go test, when things are very broken
+}
+
+func TestIndex(t *testing.T) {
+ src := filepath.Join(runtime.GOROOT(), "src")
+ checkPkg := func(t *testing.T, m *Module, pkg string, data []byte) {
+ p := m.Package(pkg)
+ bp, err := p.Import(build.Default, build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bp1, err := build.Default.Import(pkg, filepath.Join(src, pkg), build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(bp, bp1) {
+ t.Errorf("mismatch")
+ t.Logf("index:\n%s", hex.Dump(data))
+
+ js, err := json.MarshalIndent(bp, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ js1, err := json.MarshalIndent(bp1, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("diff:\n%s", diff.Diff("index", js, "correct", js1))
+ t.FailNow()
+ }
+ }
+
+ // Check packages in increasing complexity, one at a time.
+ pkgs := []string{
+ "crypto",
+ "encoding",
+ "unsafe",
+ "encoding/json",
+ "runtime",
+ "net",
+ }
+ var raws []*rawPackage
+ for _, pkg := range pkgs {
+ raw := importRaw(src, pkg)
+ raws = append(raws, raw)
+ t.Run(pkg, func(t *testing.T) {
+ data := encodeModuleBytes([]*rawPackage{raw})
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkPkg(t, m, pkg, data)
+ })
+ }
+
+ // Check that a multi-package index works too.
+ t.Run("all", func(t *testing.T) {
+ data := encodeModuleBytes(raws)
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, pkg := range pkgs {
+ checkPkg(t, m, pkg, data)
+ }
+ })
+}
diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go
index 7ee4669e677317a5076304794b35d3de23b27329..38ddfec70fb53d7dde07be097ee3e72f854fa040 100644
--- a/src/cmd/go/internal/modindex/read.go
+++ b/src/cmd/go/internal/modindex/read.go
@@ -15,7 +15,6 @@ "go/token"
"internal/godebug"
"internal/goroot"
"internal/unsafeheader"
- "math"
"path"
"path/filepath"
"runtime"
@@ -45,10 +44,9 @@ // Module represents and encoded module index file. It is used to
// do the equivalent of build.Import of packages in the module and answer other
// questions based on the index file's data.
type Module struct {
- modroot string
- od offsetDecoder
- packages map[string]int // offsets of each package
- packagePaths []string // paths to package directories relative to modroot; these are the keys of packages
+ modroot string
+ d *decoder
+ n int // number of packages
}
// moduleHash returns an ActionID corresponding to the state of the module
@@ -179,6 +177,7 @@ mi *Module
err error
}
r := mcache.Do(modroot, func() any {
+ fsys.Trace("openIndexModule", modroot)
id, err := moduleHash(modroot, ismodcache)
if err != nil {
return result{nil, err}
@@ -212,6 +211,7 @@ pkg *IndexPackage
err error
}
r := pcache.Do([2]string{modroot, pkgdir}, func() any {
+ fsys.Trace("openIndexPackage", pkgdir)
id, err := dirHash(modroot, pkgdir)
if err != nil {
return result{nil, err}
@@ -234,12 +234,30 @@ }).(result)
return r.pkg, r.err
}
-// fromBytes returns a *Module given the encoded representation.
-func fromBytes(moddir string, data []byte) (mi *Module, err error) {
- if !enabled {
- panic("use of index")
- }
+var errCorrupt = errors.New("corrupt index")
+
+// protect marks the start of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// It should not be used for trivial accesses which would be
+// dwarfed by the overhead of the defer.
+func protect() bool {
+ return debug.SetPanicOnFault(true)
+}
+
+var isTest = false
+// unprotect marks the end of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// end looks for panics due to errCorrupt or bad mmap accesses.
+// When it finds them, it adds explanatory text, consumes the panic, and sets *errp instead.
+// If errp is nil, end adds the explanatory text but then calls base.Fatalf.
+func unprotect(old bool, errp *error) {
// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
// that all its errors satisfy this interface, we'll only check for these errors so that
// we don't suppress panics that could have been produced from other sources.
@@ -247,97 +265,100 @@ type addrer interface {
Addr() uintptr
}
- // set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
- // in case it's mmapped (the common case).
- old := debug.SetPanicOnFault(true)
- defer func() {
- debug.SetPanicOnFault(old)
- if e := recover(); e != nil {
- if _, ok := e.(addrer); ok {
- // This panic was almost certainly caused by SetPanicOnFault.
- err = fmt.Errorf("error reading module index: %v", e)
+ debug.SetPanicOnFault(old)
+
+ if e := recover(); e != nil {
+ if _, ok := e.(addrer); ok || e == errCorrupt {
+ // This panic was almost certainly caused by SetPanicOnFault or our panic(errCorrupt).
+ err := fmt.Errorf("error reading module index: %v", e)
+ if errp != nil {
+ *errp = err
return
}
- // The panic was likely not caused by SetPanicOnFault.
- panic(e)
+ if isTest {
+ panic(err)
+ }
+ base.Fatalf("%v", err)
}
- }()
+ // The panic was likely not caused by SetPanicOnFault.
+ panic(e)
+ }
+}
- gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
- if string(gotVersion) != indexVersion {
- return nil, fmt.Errorf("bad index version string: %q", gotVersion)
+// fromBytes returns a *Module given the encoded representation.
+func fromBytes(moddir string, data []byte) (m *Module, err error) {
+ if !enabled {
+ panic("use of index")
}
- stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
- st := newStringTable(data[stringTableOffset:])
- d := decoder{unread, st}
- numPackages := d.int()
+
+ defer unprotect(protect(), &err)
+
+ if !bytes.HasPrefix(data, []byte(indexVersion+"\n")) {
+ return nil, errCorrupt
+ }
- packagePaths := make([]string, numPackages)
- for i := range packagePaths {
- packagePaths[i] = d.string()
+ const hdr = len(indexVersion + "\n")
+ d := &decoder{data: data}
+ str := d.intAt(hdr)
+ if str < hdr+8 || len(d.data) < str {
+ return nil, errCorrupt
}
- packageOffsets := make([]int, numPackages)
- for i := range packageOffsets {
- packageOffsets[i] = d.int()
+ d.data, d.str = data[:str], d.data[str:]
+ // Check that string table looks valid.
+ // First string is empty string (length 0),
+ // and we leave a marker byte 0xFF at the end
+ // just to make sure that the file is not truncated.
+ if len(d.str) == 0 || d.str[0] != 0 || d.str[len(d.str)-1] != 0xFF {
+ return nil, errCorrupt
}
- packages := make(map[string]int, numPackages)
- for i := range packagePaths {
- packages[packagePaths[i]] = packageOffsets[i]
+
+ n := d.intAt(hdr + 4)
+ if n < 0 || n > (len(d.data)-8)/8 {
+ return nil, errCorrupt
}
- return &Module{
+ m = &Module{
moddir,
- offsetDecoder{data, st},
- packages,
- packagePaths,
- }, nil
+ d,
+ n,
+ }
+ return m, nil
}
// packageFromBytes returns a *IndexPackage given the encoded representation.
func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) {
- if !enabled {
- panic("use of package index when not enabled")
+ m, err := fromBytes(modroot, data)
+ if err != nil {
+ return nil, err
}
-
- // SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
- // that all its errors satisfy this interface, we'll only check for these errors so that
- // we don't suppress panics that could have been produced from other sources.
- type addrer interface {
- Addr() uintptr
+ if m.n != 1 {
+ return nil, fmt.Errorf("corrupt single-package index")
}
+ return m.pkg(0), nil
+}
- // set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
- // in case it's mmapped (the common case).
- old := debug.SetPanicOnFault(true)
- defer func() {
- debug.SetPanicOnFault(old)
- if e := recover(); e != nil {
- if _, ok := e.(addrer); ok {
- // This panic was almost certainly caused by SetPanicOnFault.
- err = fmt.Errorf("error reading module index: %v", e)
- return
- }
- // The panic was likely not caused by SetPanicOnFault.
- panic(e)
- }
- }()
+// pkgDir returns the dir string of the i'th package in the index.
+func (m *Module) pkgDir(i int) string {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
+ }
+ return m.d.stringAt(12 + 8 + 8*i)
+}
- gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
- if string(gotVersion) != indexVersion {
- return nil, fmt.Errorf("bad index version string: %q", gotVersion)
+// pkgOff returns the offset of the data for the i'th package in the index.
+func (m *Module) pkgOff(i int) int {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
}
- stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
- st := newStringTable(data[stringTableOffset:])
- d := &decoder{unread, st}
- p = decodePackage(d, offsetDecoder{data, st})
- p.modroot = modroot
- return p, nil
+ return m.d.intAt(12 + 8 + 8*i + 4)
}
-// Returns a list of directory paths, relative to the modroot, for
-// packages contained in the module index.
-func (mi *Module) Packages() []string {
- return mi.packagePaths
+// Walk calls f for each package in the index, passing the path to that package relative to the module root.
+func (m *Module) Walk(f func(path string)) {
+ defer unprotect(protect(), nil)
+ for i := 0; i < m.n; i++ {
+ f(m.pkgDir(i))
+ }
}
// relPath returns the path relative to the module's root.
@@ -347,11 +368,7 @@ }
// Import is the equivalent of build.Import given the information in Module.
func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) {
- defer func() {
- if e := recover(); e != nil {
- err = fmt.Errorf("error reading module index: %v", e)
- }
- }()
+ defer unprotect(protect(), &err)
ctxt := (*Context)(&bctxt)
@@ -792,46 +809,44 @@ }
var errCannotFindPackage = errors.New("cannot find package")
-// Package returns an IndexPackage constructed using the information in the Module.
-func (mi *Module) Package(path string) *IndexPackage {
- defer func() {
- if e := recover(); e != nil {
- base.Fatalf("error reading module index: %v", e)
- }
- }()
- offset, ok := mi.packages[path]
+// Package and returns finds the package with the given path (relative to the module root).
+// If the package does not exist, Package returns an IndexPackage that will return an
+// appropriate error from its methods.
+func (m *Module) Package(path string) *IndexPackage {
+ defer unprotect(protect(), nil)
+
+ i, ok := sort.Find(m.n, func(i int) int {
+ return strings.Compare(path, m.pkgDir(i))
+ })
if !ok {
- return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(mi.modroot, path))}
+ return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(m.modroot, path))}
}
-
- // TODO(matloob): do we want to lock on the module index?
- d := mi.od.decoderAt(offset)
- p := decodePackage(d, mi.od)
- p.modroot = mi.modroot
- return p
+ return m.pkg(i)
}
-func decodePackage(d *decoder, od offsetDecoder) *IndexPackage {
- rp := new(IndexPackage)
- if errstr := d.string(); errstr != "" {
- rp.error = errors.New(errstr)
+// pkgAt returns the i'th IndexPackage in m.
+func (m *Module) pkg(i int) *IndexPackage {
+ r := m.d.readAt(m.pkgOff(i))
+ p := new(IndexPackage)
+ if errstr := r.string(); errstr != "" {
+ p.error = errors.New(errstr)
}
- rp.dir = d.string()
- numSourceFiles := d.uint32()
- rp.sourceFiles = make([]*sourceFile, numSourceFiles)
- for i := uint32(0); i < numSourceFiles; i++ {
- offset := d.uint32()
- rp.sourceFiles[i] = &sourceFile{
- od: od.offsetDecoderAt(offset),
+ p.dir = r.string()
+ p.sourceFiles = make([]*sourceFile, r.int())
+ for i := range p.sourceFiles {
+ p.sourceFiles[i] = &sourceFile{
+ d: m.d,
+ pos: r.int(),
}
}
- return rp
+ p.modroot = m.modroot
+ return p
}
// sourceFile represents the information of a given source file in the module index.
type sourceFile struct {
- od offsetDecoder // od interprets all offsets relative to the start of the source file's data
-
+ d *decoder // encoding of this source file
+ pos int // start of sourceFile encoding in d
onceReadImports sync.Once
savedImports []rawImport // saved imports so that they're only read once
}
@@ -851,73 +866,67 @@ sourceFileNumPlusBuildConstraints
)
func (sf *sourceFile) error() string {
- return sf.od.stringAt(sourceFileError)
+ return sf.d.stringAt(sf.pos + sourceFileError)
}
func (sf *sourceFile) parseError() string {
- return sf.od.stringAt(sourceFileParseError)
+ return sf.d.stringAt(sf.pos + sourceFileParseError)
}
func (sf *sourceFile) synopsis() string {
- return sf.od.stringAt(sourceFileSynopsis)
+ return sf.d.stringAt(sf.pos + sourceFileSynopsis)
}
func (sf *sourceFile) name() string {
- return sf.od.stringAt(sourceFileName)
+ return sf.d.stringAt(sf.pos + sourceFileName)
}
func (sf *sourceFile) pkgName() string {
- return sf.od.stringAt(sourceFilePkgName)
+ return sf.d.stringAt(sf.pos + sourceFilePkgName)
}
func (sf *sourceFile) ignoreFile() bool {
- return sf.od.boolAt(sourceFileIgnoreFile)
+ return sf.d.boolAt(sf.pos + sourceFileIgnoreFile)
}
func (sf *sourceFile) binaryOnly() bool {
- return sf.od.boolAt(sourceFileBinaryOnly)
+ return sf.d.boolAt(sf.pos + sourceFileBinaryOnly)
}
func (sf *sourceFile) cgoDirectives() string {
- return sf.od.stringAt(sourceFileCgoDirectives)
+ return sf.d.stringAt(sf.pos + sourceFileCgoDirectives)
}
func (sf *sourceFile) goBuildConstraint() string {
- return sf.od.stringAt(sourceFileGoBuildConstraint)
+ return sf.d.stringAt(sf.pos + sourceFileGoBuildConstraint)
}
func (sf *sourceFile) plusBuildConstraints() []string {
- d := sf.od.decoderAt(sourceFileNumPlusBuildConstraints)
- n := d.int()
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ pos += 4
ret := make([]string, n)
for i := 0; i < n; i++ {
- ret[i] = d.string()
+ ret[i] = sf.d.stringAt(pos)
+ pos += 4
}
return ret
}
-func importsOffset(numPlusBuildConstraints int) int {
- // 4 bytes per uin32, add one to advance past numPlusBuildConstraints itself
- return sourceFileNumPlusBuildConstraints + 4*(numPlusBuildConstraints+1)
-}
-
func (sf *sourceFile) importsOffset() int {
- numPlusBuildConstraints := sf.od.intAt(sourceFileNumPlusBuildConstraints)
- return importsOffset(numPlusBuildConstraints)
-}
-
-func embedsOffset(importsOffset, numImports int) int {
- // 4 bytes per uint32; 1 to advance past numImports itself, and 5 uint32s per import
- return importsOffset + 4*(1+(5*numImports))
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ // each build constraint is 1 uint32
+ return pos + 4 + n*4
}
func (sf *sourceFile) embedsOffset() int {
- importsOffset := sf.importsOffset()
- numImports := sf.od.intAt(importsOffset)
- return embedsOffset(importsOffset, numImports)
+ pos := sf.importsOffset()
+ n := sf.d.intAt(pos)
+ // each import is 5 uint32s (string + tokpos)
+ return pos + 4 + n*(4*5)
}
func (sf *sourceFile) imports() []rawImport {
sf.onceReadImports.Do(func() {
importsOffset := sf.importsOffset()
- d := sf.od.decoderAt(importsOffset)
- numImports := d.int()
+ r := sf.d.readAt(importsOffset)
+ numImports := r.int()
ret := make([]rawImport, numImports)
for i := 0; i < numImports; i++ {
- ret[i].path = d.string()
- ret[i].position = d.tokpos()
+ ret[i] = rawImport{r.string(), r.tokpos()}
}
sf.savedImports = ret
})
@@ -926,132 +935,101 @@ }
func (sf *sourceFile) embeds() []embed {
embedsOffset := sf.embedsOffset()
- d := sf.od.decoderAt(embedsOffset)
- numEmbeds := d.int()
+ r := sf.d.readAt(embedsOffset)
+ numEmbeds := r.int()
ret := make([]embed, numEmbeds)
for i := range ret {
- pattern := d.string()
- pos := d.tokpos()
- ret[i] = embed{pattern, pos}
+ ret[i] = embed{r.string(), r.tokpos()}
}
return ret
}
-// A decoder reads from the current position of the file and advances its position as it
-// reads.
-type decoder struct {
- b []byte
- st *stringTable
-}
+func asString(b []byte) string {
+ p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data
-func (d *decoder) uint32() uint32 {
- n := binary.LittleEndian.Uint32(d.b[:4])
- d.b = d.b[4:]
- return n
-}
+ var s string
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = p
+ hdr.Len = len(b)
-func (d *decoder) int() int {
- n := d.uint32()
- if int64(n) > math.MaxInt {
- base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
- }
- return int(n)
+ return s
}
-func (d *decoder) tokpos() token.Position {
- file := d.string()
- offset := d.int()
- line := d.int()
- column := d.int()
- return token.Position{
- Filename: file,
- Offset: offset,
- Line: line,
- Column: column,
- }
+// A decoder helps decode the index format.
+type decoder struct {
+ data []byte // data after header
+ str []byte // string table
}
-func (d *decoder) string() string {
- return d.st.string(d.int())
+// intAt returns the int at the given offset in d.data.
+func (d *decoder) intAt(off int) int {
+ if off < 0 || len(d.data)-off < 4 {
+ panic(errCorrupt)
+ }
+ i := binary.LittleEndian.Uint32(d.data[off : off+4])
+ if int32(i)>>31 != 0 {
+ panic(errCorrupt)
+ }
+ return int(i)
}
-// And offset decoder reads information offset from its position in the file.
-// It's either offset from the beginning of the index, or the beginning of a sourceFile's data.
-type offsetDecoder struct {
- b []byte
- st *stringTable
+// boolAt returns the bool at the given offset in d.data.
+func (d *decoder) boolAt(off int) bool {
+ return d.intAt(off) != 0
}
-func (od *offsetDecoder) uint32At(offset int) uint32 {
- if offset > len(od.b) {
- base.Fatalf("go: trying to read from index file at offset higher than file length. This indicates a corrupt offset file in the cache.")
- }
- return binary.LittleEndian.Uint32(od.b[offset:])
+// stringTableAt returns the string pointed at by the int at the given offset in d.data.
+func (d *decoder) stringAt(off int) string {
+ return d.stringTableAt(d.intAt(off))
}
-func (od *offsetDecoder) intAt(offset int) int {
- n := od.uint32At(offset)
- if int64(n) > math.MaxInt {
- base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
+// stringTableAt returns the string at the given offset in the string table d.str.
+func (d *decoder) stringTableAt(off int) string {
+ if off < 0 || off >= len(d.str) {
+ panic(errCorrupt)
}
- return int(n)
-}
-
-func (od *offsetDecoder) boolAt(offset int) bool {
- switch v := od.uint32At(offset); v {
- case 0:
- return false
- case 1:
- return true
- default:
- base.Fatalf("go: invalid bool value in index file encoding: %v", v)
+ s := d.str[off:]
+ v, n := binary.Uvarint(s)
+ if n <= 0 || v > uint64(len(s[n:])) {
+ panic(errCorrupt)
}
- panic("unreachable")
+ return asString(s[n : n+int(v)])
}
-func (od *offsetDecoder) stringAt(offset int) string {
- return od.st.string(od.intAt(offset))
+// A reader reads sequential fields from a section of the index format.
+type reader struct {
+ d *decoder
+ pos int
}
-func (od *offsetDecoder) decoderAt(offset int) *decoder {
- return &decoder{od.b[offset:], od.st}
+// readAt returns a reader starting at the given position in d.
+func (d *decoder) readAt(pos int) *reader {
+ return &reader{d, pos}
}
-func (od *offsetDecoder) offsetDecoderAt(offset uint32) offsetDecoder {
- return offsetDecoder{od.b[offset:], od.st}
+// int reads the next int.
+func (r *reader) int() int {
+ i := r.d.intAt(r.pos)
+ r.pos += 4
+ return i
}
-type stringTable struct {
- b []byte
+// string reads the next string.
+func (r *reader) string() string {
+ return r.d.stringTableAt(r.int())
}
-func newStringTable(b []byte) *stringTable {
- return &stringTable{b: b}
+// bool reads the next bool.
+func (r *reader) bool() bool {
+ return r.int() != 0
}
-func (st *stringTable) string(pos int) string {
- if pos == 0 {
- return ""
- }
-
- bb := st.b[pos:]
- i := bytes.IndexByte(bb, 0)
-
- if i == -1 {
- panic("reached end of string table trying to read string")
+// tokpos reads the next token.Position.
+func (r *reader) tokpos() token.Position {
+ return token.Position{
+ Filename: r.string(),
+ Offset: r.int(),
+ Line: r.int(),
+ Column: r.int(),
}
- s := asString(bb[:i])
-
- return s
-}
-
-func asString(b []byte) string {
- p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data
-
- var s string
- hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
- hdr.Data = p
- hdr.Len = len(b)
-
- return s
}
diff --git a/src/cmd/go/internal/modindex/scan.go b/src/cmd/go/internal/modindex/scan.go
index 1ba7c0cad1d53464f6bf2474e38370ad43bd34a8..d3f059bcfc6a9534009e379d247e064d6cde8046 100644
--- a/src/cmd/go/internal/modindex/scan.go
+++ b/src/cmd/go/internal/modindex/scan.go
@@ -46,6 +46,7 @@ // indexModule indexes the module at the given directory and returns its
// encoded representation. It returns ErrNotIndexed if the module can't
// be indexed because it contains symlinks.
func indexModule(modroot string) ([]byte, error) {
+ fsys.Trace("indexModule", modroot)
var packages []*rawPackage
err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
if err := moduleWalkErr(modroot, path, info, err); err != nil {
@@ -72,6 +73,7 @@ // indexModule indexes the package at the given directory and returns its
// encoded representation. It returns ErrNotIndexed if the package can't
// be indexed.
func indexPackage(modroot, pkgdir string) []byte {
+ fsys.Trace("indexPackage", pkgdir)
p := importRaw(modroot, relPath(pkgdir, modroot))
return encodePackageBytes(p)
}
diff --git a/src/cmd/go/internal/modindex/write.go b/src/cmd/go/internal/modindex/write.go
index 3408248bd9e6cd216ca00409944e37662e26abcc..7db1fb087052c7d353f6fa8aaf7e2b8936f5bdbb 100644
--- a/src/cmd/go/internal/modindex/write.go
+++ b/src/cmd/go/internal/modindex/write.go
@@ -1,54 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package modindex
import (
"cmd/go/internal/base"
"encoding/binary"
"go/token"
- "math"
"sort"
- "strings"
)
-const indexVersion = "go index v0"
+const indexVersion = "go index v1" // 11 bytes (plus \n), to align uint32s in index
// encodeModuleBytes produces the encoded representation of the module index.
// encodeModuleBytes may modify the packages slice.
func encodeModuleBytes(packages []*rawPackage) []byte {
e := newEncoder()
- e.Bytes([]byte(indexVersion))
- e.Bytes([]byte{'\n'})
+ e.Bytes([]byte(indexVersion + "\n"))
stringTableOffsetPos := e.Pos() // fill this at the end
e.Uint32(0) // string table offset
- e.Int(len(packages))
sort.Slice(packages, func(i, j int) bool {
return packages[i].dir < packages[j].dir
})
+ e.Int(len(packages))
+ packagesPos := e.Pos()
for _, p := range packages {
e.String(p.dir)
- }
- packagesOffsetPos := e.Pos()
- for range packages {
e.Int(0)
}
for i, p := range packages {
- e.IntAt(e.Pos(), packagesOffsetPos+4*i)
+ e.IntAt(e.Pos(), packagesPos+8*i+4)
encodePackage(e, p)
}
e.IntAt(e.Pos(), stringTableOffsetPos)
e.Bytes(e.stringTable)
+ e.Bytes([]byte{0xFF}) // end of string table marker
return e.b
}
func encodePackageBytes(p *rawPackage) []byte {
- e := newEncoder()
- e.Bytes([]byte(indexVersion))
- e.Bytes([]byte{'\n'})
- stringTableOffsetPos := e.Pos() // fill this at the end
- e.Uint32(0) // string table offset
- encodePackage(e, p)
- e.IntAt(e.Pos(), stringTableOffsetPos)
- e.Bytes(e.stringTable)
- return e.b
+ return encodeModuleBytes([]*rawPackage{p})
}
func encodePackage(e *encoder, p *rawPackage) {
@@ -126,9 +118,6 @@ e.b = append(e.b, b...)
}
func (e *encoder) String(s string) {
- if strings.IndexByte(s, 0) >= 0 {
- base.Fatalf("go: attempting to encode a string containing a null byte")
- }
if n, ok := e.strings[s]; ok {
e.Int(n)
return
@@ -136,8 +125,8 @@ }
pos := len(e.stringTable)
e.strings[s] = pos
e.Int(pos)
+ e.stringTable = binary.AppendUvarint(e.stringTable, uint64(len(s)))
e.stringTable = append(e.stringTable, []byte(s)...)
- e.stringTable = append(e.stringTable, 0)
}
func (e *encoder) Bool(b bool) {
@@ -152,17 +141,18 @@ func (e *encoder) Uint32(n uint32) {
e.b = binary.LittleEndian.AppendUint32(e.b, n)
}
-// Int encodes n. Note that all ints are written to the index as uint32s.
+// Int encodes n. Note that all ints are written to the index as uint32s,
+// and to avoid problems on 32-bit systems we require fitting into a 32-bit int.
func (e *encoder) Int(n int) {
- if n < 0 || int64(n) > math.MaxUint32 {
- base.Fatalf("go: attempting to write an int to the index that overflows uint32")
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
}
e.Uint32(uint32(n))
}
func (e *encoder) IntAt(n int, at int) {
- if n < 0 || int64(n) > math.MaxUint32 {
- base.Fatalf("go: attempting to write an int to the index that overflows uint32")
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
}
binary.LittleEndian.PutUint32(e.b[at:], uint32(n))
}
diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go
index 856390a0f25511c2ff0fd58679017c4db0ce1300..b2ac7f22b1ea9a3cb2e1f22c1663879ac5f42dc0 100644
--- a/src/cmd/go/internal/modload/search.go
+++ b/src/cmd/go/internal/modload/search.go
@@ -216,21 +216,20 @@ // walkFromIndex matches packages in a module using the module index. modroot
// is the module's root directory on disk, index is the modindex.Module for the
// module, and importPathRoot is the module's path prefix.
func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) {
-loopPackages:
- for _, reldir := range index.Packages() {
+ index.Walk(func(reldir string) {
// Avoid .foo, _foo, and testdata subdirectory trees.
p := reldir
for {
elem, rest, found := strings.Cut(p, string(filepath.Separator))
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
- continue loopPackages
+ return
}
if found && elem == "vendor" {
// Ignore this path if it contains the element "vendor" anywhere
// except for the last element (packages named vendor are allowed
// for historical reasons). Note that found is true when this
// isn't the last path element.
- continue loopPackages
+ return
}
if !found {
// Didn't find the separator, so we're considering the last element.
@@ -241,12 +240,12 @@ }
// Don't use GOROOT/src.
if reldir == "" && importPathRoot == "" {
- continue
+ return
}
name := path.Join(importPathRoot, filepath.ToSlash(reldir))
if !treeCanMatch(name) {
- continue
+ return
}
if !have[name] {
@@ -257,7 +256,7 @@ addPkg(name)
}
}
}
- }
+ })
}
// MatchInModule identifies the packages matching the given pattern within the
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
index 5bf548db32a297a03c9a29e1eb2e4e692d1df02d..255ff3a0c50d84c0016a63929e8385c3f4260694 100644
--- a/src/cmd/go/internal/work/init.go
+++ b/src/cmd/go/internal/work/init.go
@@ -211,7 +211,11 @@ case "android":
codegenArg = "-shared"
ldBuildmode = "pie"
case "windows":
- ldBuildmode = "pie"
+ if cfg.BuildRace {
+ ldBuildmode = "exe"
+ } else {
+ ldBuildmode = "pie"
+ }
case "ios":
codegenArg = "-shared"
ldBuildmode = "pie"
diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
index 5e82929f19a5dd4bfe86110f44d8f92ddec551cf..809dfb452f36683ae7b700bb9da045a83141588c 100644
--- a/src/cmd/go/script_test.go
+++ b/src/cmd/go/script_test.go
@@ -163,7 +163,7 @@ ts.check(os.MkdirAll(filepath.Join(ts.workdir, "gopath/src"), 0777))
ts.cd = filepath.Join(ts.workdir, "gopath/src")
ts.env = []string{
"WORK=" + ts.workdir, // must be first for ts.abbrev
- "PATH=" + testBin + string(filepath.ListSeparator) + os.Getenv("PATH"),
+ pathEnvName() + "=" + testBin + string(filepath.ListSeparator) + os.Getenv(pathEnvName()),
homeEnvName() + "=/no-home",
"CCACHE_DISABLE=1", // ccache breaks with non-existent HOME
"GOARCH=" + runtime.GOARCH,
@@ -187,8 +187,6 @@ "PWD=" + ts.cd,
tempEnvName() + "=" + filepath.Join(ts.workdir, "tmp"),
"devnull=" + os.DevNull,
"goversion=" + goVersion(ts),
- ":=" + string(os.PathListSeparator),
- "/=" + string(os.PathSeparator),
"CMDGO_TEST_RUN_MAIN=true",
}
if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" {
@@ -203,10 +201,6 @@ if !testenv.HasExternalNetwork() {
ts.env = append(ts.env, "TESTGONETWORK=panic", "TESTGOVCS=panic")
}
- if runtime.GOOS == "plan9" {
- ts.env = append(ts.env, "path="+testBin+string(filepath.ListSeparator)+os.Getenv("path"))
- }
-
for _, key := range extraEnvKeys {
if val := os.Getenv(key); val != "" {
ts.env = append(ts.env, key+"="+val)
@@ -219,6 +213,10 @@ if i := strings.Index(kv, "="); i >= 0 {
ts.envMap[kv[:i]] = kv[i+1:]
}
}
+ // Add entries for ${:} and ${/} to make it easier to write platform-independent
+ // environment variables.
+ ts.envMap["/"] = string(os.PathSeparator)
+ ts.envMap[":"] = string(os.PathListSeparator)
fmt.Fprintf(&ts.log, "# (%s)\n", time.Now().UTC().Format(time.RFC3339))
ts.mark = ts.log.Len()
@@ -1264,12 +1262,7 @@ return fi.Mode().IsRegular() && fi.Mode().Perm()&0111 != 0
}
}
- pathName := "PATH"
- if runtime.GOOS == "plan9" {
- pathName = "path"
- }
-
- for _, dir := range strings.Split(ts.envMap[pathName], string(filepath.ListSeparator)) {
+ for _, dir := range strings.Split(ts.envMap[pathEnvName()], string(filepath.ListSeparator)) {
if searchExt {
ents, err := os.ReadDir(dir)
if err != nil {
diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README
index c575bff1a517fc3d9db781dbd161633f267ba6a1..e52917684f8c86c4fd9b8192de6e1f9ea2a082a9 100644
--- a/src/cmd/go/testdata/script/README
+++ b/src/cmd/go/testdata/script/README
@@ -41,12 +41,19 @@ TMPDIR=$WORK/tmp
GODEBUG=
- Above the event trace for the first logical processor are + Above the event trace for the first logical processor are traces for various runtime-internal events. The "GC" bar shows when the garbage collector is running, and in which stage. diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go index 1cabc25cedc72901d6af75dca416b4b392f7e9c2..e6c4cca72e00601193727c7d686f18f2ff8467e0 100644 --- a/src/cmd/trace/trace.go +++ b/src/cmd/trace/trace.go @@ -571,7 +571,7 @@ } fname := stk[0].Fn info.name = fmt.Sprintf("G%v %s", newG, fname) - info.isSystemG = isSystemGoroutine(fname) + info.isSystemG = trace.IsSystemGoroutine(fname) ctx.gcount++ setGState(ev, newG, gDead, gRunnable) @@ -1127,12 +1127,6 @@ parent.children[frame.PC] = node ctx.consumer.consumeViewerFrame(strconv.Itoa(node.id), traceviewer.Frame{Name: fmt.Sprintf("%v:%v", frame.Fn, frame.Line), Parent: parent.id}) } return ctx.buildBranch(node, stk) -} - -func isSystemGoroutine(entryFn string) bool { - // This mimics runtime.isSystemGoroutine as closely as - // possible. - return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.") } // firstTimestamp returns the timestamp of the first event record. diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go index aa6780f8474989664bfcf6746d36fb83e3e403e8..ba8de97e6acba44e142a23955d864f849c3d526a 100644 --- a/src/compress/gzip/gunzip.go +++ b/src/compress/gzip/gunzip.go @@ -248,42 +248,40 @@ if z.err != nil { return 0, z.err } - n, z.err = z.decompressor.Read(p) - z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) - z.size += uint32(n) - if z.err != io.EOF { - // In the normal case we return here. - return n, z.err - } + for n == 0 { + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } - // Finished file; check checksum and size. - if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { - z.err = noEOF(err) - return n, z.err - } - digest := le.Uint32(z.buf[:4]) - size := le.Uint32(z.buf[4:8]) - if digest != z.digest || size != z.size { - z.err = ErrChecksum - return n, z.err - } - z.digest, z.size = 0, 0 + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 - // File is ok; check if there is another. - if !z.multistream { - return n, io.EOF - } - z.err = nil // Remove io.EOF + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF - if _, z.err = z.readHeader(); z.err != nil { - return n, z.err + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } } - // Read from next file, if necessary. - if n > 0 { - return n, nil - } - return z.Read(p) + return n, nil } // Close closes the Reader. It does not close the underlying io.Reader. diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go index be691854639577f1ce01b948884bfd7ea89c549f..3309ff61954480a36482ea46a8b3494849385e29 100644 --- a/src/compress/gzip/gunzip_test.go +++ b/src/compress/gzip/gunzip_test.go @@ -569,3 +569,19 @@ } } } } + +func TestCVE202230631(t *testing.T) { + var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00, + 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + r := bytes.NewReader(bytes.Repeat(empty, 4e6)) + z, err := NewReader(r) + if err != nil { + t.Fatalf("NewReader: got %v, want nil", err) + } + // Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due + // to stack exhaustion. + _, err = z.Read(make([]byte, 10)) + if err != io.EOF { + t.Errorf("Reader.Read: got %v, want %v", err, io.EOF) + } +} diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go index e0e8f6125fdca577aa29844444f8b923343280f5..a2d3d809642b221fbde361eabaa86166c5b72c8f 100644 --- a/src/crypto/x509/parser.go +++ b/src/crypto/x509/parser.go @@ -1008,22 +1008,22 @@ // we read the SEQUENCE including length and tag bytes so that // we can populate RevocationList.Raw, before unwrapping the // SEQUENCE so it can be operated on if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) { - return nil, errors.New("x509: malformed certificate") + return nil, errors.New("x509: malformed crl") } rl.Raw = input if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { - return nil, errors.New("x509: malformed certificate") + return nil, errors.New("x509: malformed crl") } var tbs cryptobyte.String // do the same trick again as above to extract the raw // bytes for Certificate.RawTBSCertificate if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) { - return nil, errors.New("x509: malformed tbs certificate") + return nil, errors.New("x509: malformed tbs crl") } rl.RawTBSRevocationList = tbs if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { - return nil, errors.New("x509: malformed tbs certificate") + return nil, errors.New("x509: malformed tbs crl") } var version int @@ -1106,13 +1106,10 @@ return nil, err } var extensions cryptobyte.String var present bool - if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) { + if !certSeq.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) { return nil, errors.New("x509: malformed extensions") } if present { - if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) { - return nil, errors.New("x509: malformed extensions") - } for !extensions.Empty() { var extension cryptobyte.String if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) { @@ -1147,6 +1144,15 @@ } ext, err := parseExtension(extension) if err != nil { return nil, err + } + if ext.Id.Equal(oidExtensionAuthorityKeyId) { + rl.AuthorityKeyId = ext.Value + } else if ext.Id.Equal(oidExtensionCRLNumber) { + value := cryptobyte.String(ext.Value) + rl.Number = new(big.Int) + if !value.ReadASN1Integer(rl.Number) { + return nil, errors.New("x509: malformed crl number") + } } rl.Extensions = append(rl.Extensions, ext) } diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go index 87eb1f7720b69dca653e8504914389e8f0f2342d..950f6d08c8fff9ac70be105b70b3eb5d3f7efd83 100644 --- a/src/crypto/x509/x509.go +++ b/src/crypto/x509/x509.go @@ -2109,7 +2109,9 @@ // Issuer contains the DN of the issuing certificate. Issuer pkix.Name // AuthorityKeyId is used to identify the public key associated with the - // issuing certificate. + // issuing certificate. It is populated from the authorityKeyIdentifier + // extension when parsing a CRL. It is ignored when creating a CRL; the + // extension is populated from the issuing certificate itself. AuthorityKeyId []byte Signature []byte @@ -2125,7 +2127,8 @@ RevokedCertificates []pkix.RevokedCertificate // Number is used to populate the X.509 v2 cRLNumber extension in the CRL, // which should be a monotonically increasing sequence number for a given - // CRL scope and CRL issuer. + // CRL scope and CRL issuer. It is also populated from the cRLNumber + // extension when parsing a CRL. Number *big.Int // ThisUpdate is used to populate the thisUpdate field in the CRL, which @@ -2192,6 +2195,10 @@ aki, err := asn1.Marshal(authKeyId{Id: issuer.SubjectKeyId}) if err != nil { return nil, err + } + + if numBytes := template.Number.Bytes(); len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) { + return nil, errors.New("x509: CRL number exceeds 20 octets") } crlNum, err := asn1.Marshal(template.Number) if err != nil { diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go index 8ef6115df4c02d144eed9d20abf06d3eded7ae13..cba44f6f8c0f50168db9016329b9be2d68ebea3a 100644 --- a/src/crypto/x509/x509_test.go +++ b/src/crypto/x509/x509_test.go @@ -2479,6 +2479,40 @@ }, expectedError: "x509: template contains nil Number field", }, { + name: "long Number", + key: ec256Priv, + issuer: &Certificate{ + KeyUsage: KeyUsageCRLSign, + Subject: pkix.Name{ + CommonName: "testing", + }, + SubjectKeyId: []byte{1, 2, 3}, + }, + template: &RevocationList{ + ThisUpdate: time.Time{}.Add(time.Hour * 24), + NextUpdate: time.Time{}.Add(time.Hour * 48), + Number: big.NewInt(0).SetBytes(append([]byte{1}, make([]byte, 20)...)), + }, + expectedError: "x509: CRL number exceeds 20 octets", + }, + { + name: "long Number (20 bytes, MSB set)", + key: ec256Priv, + issuer: &Certificate{ + KeyUsage: KeyUsageCRLSign, + Subject: pkix.Name{ + CommonName: "testing", + }, + SubjectKeyId: []byte{1, 2, 3}, + }, + template: &RevocationList{ + ThisUpdate: time.Time{}.Add(time.Hour * 24), + NextUpdate: time.Time{}.Add(time.Hour * 48), + Number: big.NewInt(0).SetBytes(append([]byte{255}, make([]byte, 19)...)), + }, + expectedError: "x509: CRL number exceeds 20 octets", + }, + { name: "invalid signature algorithm", key: ec256Priv, issuer: &Certificate{ @@ -2517,6 +2551,34 @@ RevokedCertificates: []pkix.RevokedCertificate{ { SerialNumber: big.NewInt(2), RevocationTime: time.Time{}.Add(time.Hour), + }, + }, + Number: big.NewInt(5), + ThisUpdate: time.Time{}.Add(time.Hour * 24), + NextUpdate: time.Time{}.Add(time.Hour * 48), + }, + }, + { + name: "valid, extra entry extension", + key: ec256Priv, + issuer: &Certificate{ + KeyUsage: KeyUsageCRLSign, + Subject: pkix.Name{ + CommonName: "testing", + }, + SubjectKeyId: []byte{1, 2, 3}, + }, + template: &RevocationList{ + RevokedCertificates: []pkix.RevokedCertificate{ + { + SerialNumber: big.NewInt(2), + RevocationTime: time.Time{}.Add(time.Hour), + Extensions: []pkix.Extension{ + { + Id: []int{2, 5, 29, 99}, + Value: []byte{5, 0}, + }, + }, }, }, Number: big.NewInt(5), @@ -2680,6 +2742,19 @@ } if !reflect.DeepEqual(parsedCRL.Extensions[2:], tc.template.ExtraExtensions) { t.Fatalf("Extensions mismatch: got %v; want %v.", parsedCRL.Extensions[2:], tc.template.ExtraExtensions) + } + + if tc.template.Number != nil && parsedCRL.Number == nil { + t.Fatalf("Generated CRL missing Number: got nil, want %s", + tc.template.Number.String()) + } + if tc.template.Number != nil && tc.template.Number.Cmp(parsedCRL.Number) != 0 { + t.Fatalf("Generated CRL has wrong Number: got %s, want %s", + parsedCRL.Number.String(), tc.template.Number.String()) + } + if !bytes.Equal(parsedCRL.AuthorityKeyId, expectedAKI) { + t.Fatalf("Generated CRL has wrong Number: got %x, want %x", + parsedCRL.AuthorityKeyId, expectedAKI) } }) } diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index 6bc869fc864c26d4ac32cba3aadadf3cd9f12fea..8c58723c03011b7d2036e5eba82c26f4836eb7e3 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -449,6 +449,16 @@ // TestTxContextWait tests the transaction behavior when the tx context is canceled // during execution of the query. func TestTxContextWait(t *testing.T) { + testContextWait(t, false) +} + +// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard +// the final connection. +func TestTxContextWaitNoDiscard(t *testing.T) { + testContextWait(t, true) +} + +func testContextWait(t *testing.T, keepConnOnRollback bool) { db := newTestDB(t, "people") defer closeDB(t, db) @@ -458,7 +468,7 @@ tx, err := db.BeginTx(ctx, nil) if err != nil { t.Fatal(err) } - tx.keepConnOnRollback = false + tx.keepConnOnRollback = keepConnOnRollback tx.dc.ci.(*fakeConn).waiter = func(c context.Context) { cancel() @@ -472,36 +482,11 @@ if err != context.Canceled { t.Fatalf("expected QueryContext to error with context canceled but returned %v", err) } - waitForFree(t, db, 0) -} - -// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard -// the final connection. -func TestTxContextWaitNoDiscard(t *testing.T) { - db := newTestDB(t, "people") - defer closeDB(t, db) - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Millisecond) - defer cancel() - - tx, err := db.BeginTx(ctx, nil) - if err != nil { - // Guard against the context being canceled before BeginTx completes. - if err == context.DeadlineExceeded { - t.Skip("tx context canceled prior to first use") - } - t.Fatal(err) + if keepConnOnRollback { + waitForFree(t, db, 1) + } else { + waitForFree(t, db, 0) } - - // This will trigger the *fakeConn.Prepare method which will take time - // performing the query. The ctxDriverPrepare func will check the context - // after this and close the rows and return an error. - _, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|") - if err != context.DeadlineExceeded { - t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err) - } - - waitForFree(t, db, 1) } // TestUnsupportedOptions checks that the database fails when a driver that diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go index 34f302a5cf54d60e6e5cf9d163d4ad8f6f9c7a6b..eea2924f1ad6274f48fdbe0f5c63bce07a1be41c 100644 --- a/src/encoding/gob/decode.go +++ b/src/encoding/gob/decode.go @@ -871,8 +871,13 @@ } return &op } +var maxIgnoreNestingDepth = 10000 + // decIgnoreOpFor returns the decoding op for a field that has no destination. -func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp { +func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp { + if depth > maxIgnoreNestingDepth { + error_(errors.New("invalid nesting depth")) + } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). // Return the pointer to the op we're already building. if opPtr := inProgress[wireId]; opPtr != nil { @@ -896,7 +901,7 @@ case wire == nil: errorf("bad data: undefined type %s", wireId.string()) case wire.ArrayT != nil: elemId := wire.ArrayT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress) + elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len) } @@ -904,15 +909,15 @@ case wire.MapT != nil: keyId := dec.wireType[wireId].MapT.Key elemId := dec.wireType[wireId].MapT.Elem - keyOp := dec.decIgnoreOpFor(keyId, inProgress) - elemOp := dec.decIgnoreOpFor(elemId, inProgress) + keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreMap(state, *keyOp, *elemOp) } case wire.SliceT != nil: elemId := wire.SliceT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress) + elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreSlice(state, *elemOp) } @@ -1073,7 +1078,7 @@ // compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded. func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine { engine := new(decEngine) engine.instr = make([]decInstr, 1) // one item - op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp)) + op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0) ovfl := overflow(dec.typeString(remoteId)) engine.instr[0] = decInstr{*op, 0, nil, ovfl} engine.numInstr = 1 @@ -1118,7 +1123,7 @@ // Find the field of the local type with the same name. localField, present := srt.FieldByName(wireField.Name) // TODO(r): anonymous names if !present || !isExported(wireField.Name) { - op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp)) + op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0) engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl} continue } diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go index 1d5dde22a4eeb50b490b04272a476e11d6d46371..3d49887c016767805c704a398c37b86fd5520b0c 100644 --- a/src/encoding/gob/gobencdec_test.go +++ b/src/encoding/gob/gobencdec_test.go @@ -12,6 +12,7 @@ "errors" "fmt" "io" "net" + "reflect" "strings" "testing" "time" @@ -796,3 +797,26 @@ if ip.String() != "1.2.3.4" { t.Errorf("decoded to %v, want 1.2.3.4", ip.String()) } } + +func TestIngoreDepthLimit(t *testing.T) { + // We don't test the actual depth limit because it requires building an + // extremely large message, which takes quite a while. + oldNestingDepth := maxIgnoreNestingDepth + maxIgnoreNestingDepth = 100 + defer func() { maxIgnoreNestingDepth = oldNestingDepth }() + b := new(bytes.Buffer) + enc := NewEncoder(b) + typ := reflect.TypeOf(int(0)) + nested := reflect.ArrayOf(1, typ) + for i := 0; i < 100; i++ { + nested = reflect.ArrayOf(1, nested) + } + badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}})) + enc.Encode(badStruct.Interface()) + dec := NewDecoder(b) + var output struct{ Hello int } + expectedErr := "invalid nesting depth" + if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { + t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) + } +} diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go index 257591262fc182db45edf044f9eaea73a41d6648..a6fb6654585c645a90457656e58625d90a879834 100644 --- a/src/encoding/xml/read.go +++ b/src/encoding/xml/read.go @@ -152,7 +152,7 @@ if val.IsNil() { return errors.New("nil pointer passed to Unmarshal") } - return d.unmarshal(val.Elem(), start) + return d.unmarshal(val.Elem(), start, 0) } // An UnmarshalError represents an error in the unmarshaling process. @@ -308,8 +308,15 @@ unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() ) +const maxUnmarshalDepth = 10000 + +var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth") + // Unmarshal a single XML element into val. -func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error { +func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error { + if depth >= maxUnmarshalDepth { + return errExeceededMaxUnmarshalDepth + } // Find start element if we need it. if start == nil { for { @@ -402,7 +409,7 @@ n := v.Len() v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem()))) // Recur to read element into slice. - if err := d.unmarshal(v.Index(n), start); err != nil { + if err := d.unmarshal(v.Index(n), start, depth+1); err != nil { v.SetLen(n) return err } @@ -525,13 +532,15 @@ switch t := tok.(type) { case StartElement: consumed := false if sv.IsValid() { - consumed, err = d.unmarshalPath(tinfo, sv, nil, &t) + // unmarshalPath can call unmarshal, so we need to pass the depth through so that + // we can continue to enforce the maximum recusion limit. + consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth) if err != nil { return err } if !consumed && saveAny.IsValid() { consumed = true - if err := d.unmarshal(saveAny, &t); err != nil { + if err := d.unmarshal(saveAny, &t, depth+1); err != nil { return err } } @@ -676,7 +685,7 @@ // paths, and calls unmarshal on them. // The consumed result tells whether XML elements have been consumed // from the Decoder until start's matching end element, or if it's // still untouched because start is uninteresting for sv's fields. -func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { +func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) { recurse := false Loop: for i := range tinfo.fields { @@ -691,7 +700,7 @@ } } if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { // It's a perfect match, unmarshal the field. - return true, d.unmarshal(finfo.value(sv, initNilPointers), start) + return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1) } if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { // It's a prefix for the field. Break and recurse @@ -720,7 +729,9 @@ return true, err } switch t := tok.(type) { case StartElement: - consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t) + // the recursion depth of unmarshalPath is limited to the path length specified + // by the struct field tag, so we don't increment the depth here. + consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth) if err != nil { return true, err } @@ -736,12 +747,12 @@ } } // Skip reads tokens until it has consumed the end element -// matching the most recent start element already consumed. -// It recurs if it encounters a start element, so it can be used to -// skip nested structures. +// matching the most recent start element already consumed, +// skipping nested structures. // It returns nil if it finds an end element matching the start // element; otherwise it returns an error describing the problem. func (d *Decoder) Skip() error { + var depth int64 for { tok, err := d.Token() if err != nil { @@ -749,11 +760,12 @@ return err } switch tok.(type) { case StartElement: - if err := d.Skip(); err != nil { - return err + depth++ + case EndElement: + if depth == 0 { + return nil } - case EndElement: - return nil + depth-- } } } diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go index 6ef55de77be12335a8870f21985d49b99466ebc9..58d1eddb6123f054eeb6fead8f30e1720df72f53 100644 --- a/src/encoding/xml/read_test.go +++ b/src/encoding/xml/read_test.go @@ -5,8 +5,11 @@ package xml import ( + "bytes" + "errors" "io" "reflect" + "runtime" "strings" "testing" "time" @@ -1094,3 +1097,32 @@ t.Fatalf("no error in unmarshalling") } } + +func TestCVE202228131(t *testing.T) { + type nested struct { + Parent *nested `xml:",any"` + } + var n nested + err := Unmarshal(bytes.Repeat([]byte(""), maxUnmarshalDepth+1), &n) + if err == nil { + t.Fatal("Unmarshal did not fail") + } else if !errors.Is(err, errExeceededMaxUnmarshalDepth) { + t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth) + } +} + +func TestCVE202230633(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("causes memory exhaustion on js/wasm") + } + defer func() { + p := recover() + if p != nil { + t.Fatal("Unmarshal panicked") + } + }() + var example struct { + Things []string + } + Unmarshal(bytes.Repeat([]byte(""), 17_000_000), &example) +} diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go index e3468f481fcad1198eaf16490406fc9c9ed99a79..d911c8e1d0adb8d13d7774ee58c70f5b315ac41f 100644 --- a/src/go/parser/interface.go +++ b/src/go/parser/interface.go @@ -94,8 +94,11 @@ var p parser defer func() { if e := recover(); e != nil { // resume same panic if it's not a bailout - if _, ok := e.(bailout); !ok { + bail, ok := e.(bailout) + if !ok { panic(e) + } else if bail.msg != "" { + p.errors.Add(p.file.Position(bail.pos), bail.msg) } } @@ -198,8 +201,11 @@ var p parser defer func() { if e := recover(); e != nil { // resume same panic if it's not a bailout - if _, ok := e.(bailout); !ok { + bail, ok := e.(bailout) + if !ok { panic(e) + } else if bail.msg != "" { + p.errors.Add(p.file.Position(bail.pos), bail.msg) } } p.errors.Sort() diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go index ca2f24c8b8cd5b959bee3272f05196d73e8b7b89..d4ad36dc672cf58091fd33d2cdfe749a4195508e 100644 --- a/src/go/parser/parser.go +++ b/src/go/parser/parser.go @@ -59,6 +59,10 @@ exprLev int // < 0: in control clause, >= 0: in expression inRhs bool // if set, the parser is parsing a rhs expression imports []*ast.ImportSpec // list of imports + + // nestLev is used to track and limit the recursion depth + // during parsing. + nestLev int } func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) { @@ -106,6 +110,24 @@ // Usage pattern: defer un(trace(p, "...")) func un(p *parser) { p.indent-- p.printTrace(")") +} + +// maxNestLev is the deepest we're willing to recurse during parsing +const maxNestLev int = 1e5 + +func incNestLev(p *parser) *parser { + p.nestLev++ + if p.nestLev > maxNestLev { + p.error(p.pos, "exceeded max nesting depth") + panic(bailout{}) + } + return p +} + +// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion. +// It is used along with incNestLev in a similar fashion to how un and trace are used. +func decNestLev(p *parser) { + p.nestLev-- } // Advance to the next token. @@ -218,8 +240,12 @@ } } } -// A bailout panic is raised to indicate early termination. -type bailout struct{} +// A bailout panic is raised to indicate early termination. pos and msg are +// only populated when bailing out of object resolution. +type bailout struct { + pos token.Pos + msg string +} func (p *parser) error(pos token.Pos, msg string) { if p.trace { @@ -1247,6 +1273,8 @@ return typeparams.PackIndexExpr(typ, opening, list, closing) } func (p *parser) tryIdentOrType() ast.Expr { + defer decNestLev(incNestLev(p)) + switch p.tok { case token.IDENT: typ := p.parseTypeName(nil) @@ -1657,7 +1685,13 @@ if x == nil { x = p.parseOperand() } - for { + // We track the nesting here rather than at the entry for the function, + // since it can iteratively produce a nested output, and we want to + // limit how deep a structure we generate. + var n int + defer func() { p.nestLev -= n }() + for n = 1; ; n++ { + incNestLev(p) switch p.tok { case token.PERIOD: p.next() @@ -1717,6 +1751,8 @@ } } func (p *parser) parseUnaryExpr() ast.Expr { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "UnaryExpr")) } @@ -1806,7 +1842,13 @@ if x == nil { x = p.parseUnaryExpr() } - for { + // We track the nesting here rather than at the entry for the function, + // since it can iteratively produce a nested output, and we want to + // limit how deep a structure we generate. + var n int + defer func() { p.nestLev -= n }() + for n = 1; ; n++ { + incNestLev(p) op, oprec := p.tokPrec() if oprec < prec1 { return x @@ -2099,6 +2141,8 @@ return } func (p *parser) parseIfStmt() *ast.IfStmt { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "IfStmt")) } @@ -2402,6 +2446,8 @@ } } func (p *parser) parseStmt() (s ast.Stmt) { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "Statement")) } diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go index a4f882d3688195fb2c9dd7b9270828edcf0c4cd0..1a46c878663478c02673baaf07172f64577c15ef 100644 --- a/src/go/parser/parser_test.go +++ b/src/go/parser/parser_test.go @@ -10,6 +10,7 @@ "fmt" "go/ast" "go/token" "io/fs" + "runtime" "strings" "testing" ) @@ -577,3 +578,171 @@ if comment != "// comment" { t.Errorf("got %q, want %q", comment, "// comment") } } + +var parseDepthTests = []struct { + name string + format string + // multipler is used when a single statement may result in more than one + // change in the depth level, for instance "1+(..." produces a BinaryExpr + // followed by a UnaryExpr, which increments the depth twice. The test + // case comment explains which nodes are triggering the multiple depth + // changes. + parseMultiplier int + // scope is true if we should also test the statement for the resolver scope + // depth limit. + scope bool + // scopeMultiplier does the same as parseMultiplier, but for the scope + // depths. + scopeMultiplier int +}{ + // The format expands the part inside « » many times. + // A second set of brackets nested inside the first stops the repetition, + // so that for example «(«1»)» expands to (((...((((1))))...))). + {name: "array", format: "package main; var x «[1]»int"}, + {name: "slice", format: "package main; var x «[]»int"}, + {name: "struct", format: "package main; var x «struct { X «int» }»", scope: true}, + {name: "pointer", format: "package main; var x «*»int"}, + {name: "func", format: "package main; var x «func()»int", scope: true}, + {name: "chan", format: "package main; var x «chan »int"}, + {name: "chan2", format: "package main; var x «<-chan »int"}, + {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType + {name: "map", format: "package main; var x «map[int]»int"}, + {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit + {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit + {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit + {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "dot", format: "package main; var x = «x.»x"}, + {name: "index", format: "package main; var x = x«[1]»"}, + {name: "slice", format: "package main; var x = x«[1:2]»"}, + {name: "slice3", format: "package main; var x = x«[1:2:3]»"}, + {name: "dottype", format: "package main; var x = x«.(any)»"}, + {name: "callseq", format: "package main; var x = x«()»"}, + {name: "methseq", format: "package main; var x = x«.m()»", parseMultiplier: 2}, // Parser nodes: SelectorExpr, CallExpr + {name: "binary", format: "package main; var x = «1+»1"}, + {name: "binaryparen", format: "package main; var x = «1+(«1»)»", parseMultiplier: 2}, // Parser nodes: BinaryExpr, ParenExpr + {name: "unary", format: "package main; var x = «^»1"}, + {name: "addr", format: "package main; var x = «& »x"}, + {name: "star", format: "package main; var x = «*»x"}, + {name: "recv", format: "package main; var x = «<-»x"}, + {name: "call", format: "package main; var x = «f(«1»)»", parseMultiplier: 2}, // Parser nodes: Ident, CallExpr + {name: "conv", format: "package main; var x = «(*T)(«1»)»", parseMultiplier: 2}, // Parser nodes: ParenExpr, CallExpr + {name: "label", format: "package main; func main() { «Label:» }"}, + {name: "if", format: "package main; func main() { «if true { «» }»}", parseMultiplier: 2, scope: true, scopeMultiplier: 2}, // Parser nodes: IfStmt, BlockStmt. Scopes: IfStmt, BlockStmt + {name: "ifelse", format: "package main; func main() { «if true {} else » {} }", scope: true}, + {name: "switch", format: "package main; func main() { «switch { default: «» }»}", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause + {name: "typeswitch", format: "package main; func main() { «switch x.(type) { default: «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause + {name: "for0", format: "package main; func main() { «for { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt + {name: "for1", format: "package main; func main() { «for x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt + {name: "for3", format: "package main; func main() { «for f(); g(); h() { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt + {name: "forrange0", format: "package main; func main() { «for range x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt + {name: "forrange1", format: "package main; func main() { «for x = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt + {name: "forrange2", format: "package main; func main() { «for x, y = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt + {name: "go", format: "package main; func main() { «go func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: GoStmt, FuncLit + {name: "defer", format: "package main; func main() { «defer func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: DeferStmt, FuncLit + {name: "select", format: "package main; func main() { «select { default: «» }» }", scope: true}, +} + +// split splits pre«mid»post into pre, mid, post. +// If the string does not have that form, split returns x, "", "". +func split(x string) (pre, mid, post string) { + start, end := strings.Index(x, "«"), strings.LastIndex(x, "»") + if start < 0 || end < 0 { + return x, "", "" + } + return x[:start], x[start+len("«") : end], x[end+len("»"):] +} + +func TestParseDepthLimit(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("causes call stack exhaustion on js/wasm") + } + for _, tt := range parseDepthTests { + for _, size := range []string{"small", "big"} { + t.Run(tt.name+"/"+size, func(t *testing.T) { + n := maxNestLev + 1 + if tt.parseMultiplier > 0 { + n /= tt.parseMultiplier + } + if size == "small" { + // Decrease the number of statements by 10, in order to check + // that we do not fail when under the limit. 10 is used to + // provide some wiggle room for cases where the surrounding + // scaffolding syntax adds some noise to the depth that changes + // on a per testcase basis. + n -= 10 + } + + pre, mid, post := split(tt.format) + if strings.Contains(mid, "«") { + left, base, right := split(mid) + mid = strings.Repeat(left, n) + base + strings.Repeat(right, n) + } else { + mid = strings.Repeat(mid, n) + } + input := pre + mid + post + + fset := token.NewFileSet() + _, err := ParseFile(fset, "", input, ParseComments|SkipObjectResolution) + if size == "small" { + if err != nil { + t.Errorf("ParseFile(...): %v (want success)", err) + } + } else { + expected := "exceeded max nesting depth" + if err == nil || !strings.HasSuffix(err.Error(), expected) { + t.Errorf("ParseFile(...) = _, %v, want %q", err, expected) + } + } + }) + } + } +} + +func TestScopeDepthLimit(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("causes call stack exhaustion on js/wasm") + } + for _, tt := range parseDepthTests { + if !tt.scope { + continue + } + for _, size := range []string{"small", "big"} { + t.Run(tt.name+"/"+size, func(t *testing.T) { + n := maxScopeDepth + 1 + if tt.scopeMultiplier > 0 { + n /= tt.scopeMultiplier + } + if size == "small" { + // Decrease the number of statements by 10, in order to check + // that we do not fail when under the limit. 10 is used to + // provide some wiggle room for cases where the surrounding + // scaffolding syntax adds some noise to the depth that changes + // on a per testcase basis. + n -= 10 + } + + pre, mid, post := split(tt.format) + if strings.Contains(mid, "«") { + left, base, right := split(mid) + mid = strings.Repeat(left, n) + base + strings.Repeat(right, n) + } else { + mid = strings.Repeat(mid, n) + } + input := pre + mid + post + + fset := token.NewFileSet() + _, err := ParseFile(fset, "", input, DeclarationErrors) + if size == "small" { + if err != nil { + t.Errorf("ParseFile(...): %v (want success)", err) + } + } else { + expected := "exceeded max scope depth during object resolution" + if err == nil || !strings.HasSuffix(err.Error(), expected) { + t.Errorf("ParseFile(...) = _, %v, want %q", err, expected) + } + } + }) + } + } +} diff --git a/src/go/parser/resolver.go b/src/go/parser/resolver.go index 767a5e20ad82ebfff720dd2bd299e62a53bd0a78..f8ff618eba3a8eacb153dd4c2f1873c94d32bee5 100644 --- a/src/go/parser/resolver.go +++ b/src/go/parser/resolver.go @@ -54,6 +54,8 @@ file.Scope = r.pkgScope file.Unresolved = r.unresolved[0:i] } +const maxScopeDepth int = 1e3 + type resolver struct { handle *token.File declErr func(token.Pos, string) @@ -85,16 +87,19 @@ return fmt.Sprintf(format, args...) } func (r *resolver) openScope(pos token.Pos) { + r.depth++ + if r.depth > maxScopeDepth { + panic(bailout{pos: pos, msg: "exceeded max scope depth during object resolution"}) + } if debugResolve { r.trace("opening scope @%v", pos) - r.depth++ } r.topScope = ast.NewScope(r.topScope) } func (r *resolver) closeScope() { + r.depth-- if debugResolve { - r.depth-- r.trace("closing scope") } r.topScope = r.topScope.Outer diff --git a/src/internal/trace/goroutines.go b/src/internal/trace/goroutines.go index a5fda489bea7980610bd7e9d52e31dc5eab645df..5da90e0b6d83487c0d2bed337e80bd8855df345e 100644 --- a/src/internal/trace/goroutines.go +++ b/src/internal/trace/goroutines.go @@ -4,7 +4,10 @@ // license that can be found in the LICENSE file. package trace -import "sort" +import ( + "sort" + "strings" +) // GDesc contains statistics and execution details of a single goroutine. type GDesc struct { @@ -126,10 +129,17 @@ } finalStat := g.snapshotStat(lastTs, activeGCStartTime) g.GExecutionStat = finalStat - for _, s := range g.activeRegions { - s.End = trigger - s.GExecutionStat = finalStat.sub(s.GExecutionStat) - g.Regions = append(g.Regions, s) + + // System goroutines are never part of regions, even though they + // "inherit" a task due to creation (EvGoCreate) from within a region. + // This may happen e.g. if the first GC is triggered within a region, + // starting the GC worker goroutines. + if !IsSystemGoroutine(g.Name) { + for _, s := range g.activeRegions { + s.End = trigger + s.GExecutionStat = finalStat.sub(s.GExecutionStat) + g.Regions = append(g.Regions, s) + } } *(g.gdesc) = gdesc{} } @@ -158,10 +168,13 @@ switch ev.Type { case EvGoCreate: g := &GDesc{ID: ev.Args[0], CreationTime: ev.Ts, gdesc: new(gdesc)} g.blockSchedTime = ev.Ts - // When a goroutine is newly created, inherit the - // task of the active region. For ease handling of - // this case, we create a fake region description with - // the task id. + // When a goroutine is newly created, inherit the task + // of the active region. For ease handling of this + // case, we create a fake region description with the + // task id. This isn't strictly necessary as this + // goroutine may not be assosciated with the task, but + // it can be convenient to see all children created + // during a region. if creatorG := gs[ev.G]; creatorG != nil && len(creatorG.gdesc.activeRegions) > 0 { regions := creatorG.gdesc.activeRegions s := regions[len(regions)-1] @@ -336,3 +349,9 @@ } gmap[0] = true // for GC events return gmap } + +func IsSystemGoroutine(entryFn string) bool { + // This mimics runtime.isSystemGoroutine as closely as + // possible. + return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.") +} diff --git a/src/io/fs/glob.go b/src/io/fs/glob.go index 45d9cb61b9632af83bf9834ef99d47e6133b193b..0e529cd05d139e20e639a97cc5caa6cc440be033 100644 --- a/src/io/fs/glob.go +++ b/src/io/fs/glob.go @@ -31,6 +31,16 @@ // If fs implements GlobFS, Glob calls fs.Glob. // Otherwise, Glob uses ReadDir to traverse the directory tree // and look for matches for the pattern. func Glob(fsys FS, pattern string) (matches []string, err error) { + return globWithLimit(fsys, pattern, 0) +} + +func globWithLimit(fsys FS, pattern string, depth int) (matches []string, err error) { + // This limit is added to prevent stack exhaustion issues. See + // CVE-2022-30630. + const pathSeparatorsLimit = 10000 + if depth > pathSeparatorsLimit { + return nil, path.ErrBadPattern + } if fsys, ok := fsys.(GlobFS); ok { return fsys.Glob(pattern) } @@ -59,9 +69,9 @@ return nil, path.ErrBadPattern } var m []string - m, err = Glob(fsys, dir) + m, err = globWithLimit(fsys, dir, depth+1) if err != nil { - return + return nil, err } for _, d := range m { matches, err = glob(fsys, d, file, matches) diff --git a/src/io/fs/glob_test.go b/src/io/fs/glob_test.go index f19bebed77f6c7c7e9fad7017b94b9c557a77bca..d052eab371366f00a17692774c87fec260b89946 100644 --- a/src/io/fs/glob_test.go +++ b/src/io/fs/glob_test.go @@ -8,6 +8,7 @@ import ( . "io/fs" "os" "path" + "strings" "testing" ) @@ -52,6 +53,15 @@ _, err := Glob(os.DirFS("."), pattern) if err != path.ErrBadPattern { t.Errorf("Glob(fs, %#q) returned err=%v, want path.ErrBadPattern", pattern, err) } + } +} + +func TestCVE202230630(t *testing.T) { + // Prior to CVE-2022-30630, a stack exhaustion would occur given a large + // number of separators. There is now a limit of 10,000. + _, err := Glob(os.DirFS("."), "/*"+strings.Repeat("/", 10001)) + if err != path.ErrBadPattern { + t.Fatalf("Glob returned err=%v, want %v", err, path.ErrBadPattern) } } diff --git a/src/net/http/fs.go b/src/net/http/fs.go index 7a1d5f4be5f6e229c05e451ea5423883f8fe3efb..4f144ebad2530ffb94cc1aa629a9d4fa874e5536 100644 --- a/src/net/http/fs.go +++ b/src/net/http/fs.go @@ -541,6 +541,7 @@ // response does not have an ETag field). h := w.Header() delete(h, "Content-Type") delete(h, "Content-Length") + delete(h, "Content-Encoding") if h.Get("Etag") != "" { delete(h, "Last-Modified") } diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go index d627dfd4be965287cb1d929ba8dc5621c02525cc..4be561cdfa1fa7864240a7e5e515db23b1a36454 100644 --- a/src/net/http/fs_test.go +++ b/src/net/http/fs_test.go @@ -564,6 +564,60 @@ t.Errorf("Content-Length mismatch: got %d, want %d", g, e) } } +// Tests that ServeFile does not generate representation metadata when +// file has not been modified, as per RFC 7232 section 4.1. +func TestServeFileNotModified_h1(t *testing.T) { testServeFileNotModified(t, h1Mode) } +func TestServeFileNotModified_h2(t *testing.T) { testServeFileNotModified(t, h2Mode) } +func testServeFileNotModified(t *testing.T, h2 bool) { + defer afterTest(t) + cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Encoding", "foo") + w.Header().Set("Etag", `"123"`) + ServeFile(w, r, "testdata/file") + + // Because the testdata is so small, it would fit in + // both the h1 and h2 Server's write buffers. For h1, + // sendfile is used, though, forcing a header flush at + // the io.Copy. http2 doesn't do a header flush so + // buffers all 11 bytes and then adds its own + // Content-Length. To prevent the Server's + // Content-Length and test ServeFile only, flush here. + w.(Flusher).Flush() + })) + defer cst.close() + req, err := NewRequest("GET", cst.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("If-None-Match", `"123"`) + resp, err := cst.c.Do(req) + if err != nil { + t.Fatal(err) + } + b, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + t.Fatal("reading Body:", err) + } + if len(b) != 0 { + t.Errorf("non-empty body") + } + if g, e := resp.StatusCode, StatusNotModified; g != e { + t.Errorf("status mismatch: got %d, want %d", g, e) + } + // HTTP1 transport sets ContentLength to 0. + if g, e1, e2 := resp.ContentLength, int64(-1), int64(0); g != e1 && g != e2 { + t.Errorf("Content-Length mismatch: got %d, want %d or %d", g, e1, e2) + } + if resp.Header.Get("Content-Type") != "" { + t.Errorf("Content-Type present, but it should not be") + } + if resp.Header.Get("Content-Encoding") != "" { + t.Errorf("Content-Encoding present, but it should not be") + } +} + func TestServeIndexHtml(t *testing.T) { defer afterTest(t) diff --git a/src/net/http/request.go b/src/net/http/request.go index d091f3c056ab0a4116e1ca444a4801cd09d4be6a..cead91d3d4471c168631bc9a62048584e307597a 100644 --- a/src/net/http/request.go +++ b/src/net/http/request.go @@ -1126,8 +1126,8 @@ // MaxBytesReader is similar to io.LimitReader but is intended for // limiting the size of incoming request bodies. In contrast to // io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a -// MaxBytesError for a Read beyond the limit, and closes the -// underlying reader when its Close method is called. +// non-nil error of type *MaxBytesError for a Read beyond the limit, +// and closes the underlying reader when its Close method is called. // // MaxBytesReader prevents clients from accidentally or maliciously // sending a large request and wasting server resources. If possible, diff --git a/src/net/http/server.go b/src/net/http/server.go index bc3a4633da8b15999f279adb2b68eadf8a261d70..87dd4129846f21c8ffce5553952703e039bed142 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -2690,6 +2690,8 @@ listeners map[*net.Listener]struct{} activeConn map[*conn]struct{} doneChan chan struct{} onShutdown []func() + + listenerGroup sync.WaitGroup } func (s *Server) getDoneChan() <-chan struct{} { @@ -2732,6 +2734,15 @@ srv.mu.Lock() defer srv.mu.Unlock() srv.closeDoneChanLocked() err := srv.closeListenersLocked() + + // Unlock srv.mu while waiting for listenerGroup. + // The group Add and Done calls are made with srv.mu held, + // to avoid adding a new listener in the window between + // us setting inShutdown above and waiting here. + srv.mu.Unlock() + srv.listenerGroup.Wait() + srv.mu.Lock() + for c := range srv.activeConn { c.rwc.Close() delete(srv.activeConn, c) @@ -2778,6 +2789,7 @@ for _, f := range srv.onShutdown { go f() } srv.mu.Unlock() + srv.listenerGroup.Wait() pollIntervalBase := time.Millisecond nextPollInterval := func() time.Duration { @@ -2794,7 +2806,7 @@ timer := time.NewTimer(nextPollInterval()) defer timer.Stop() for { - if srv.closeIdleConns() && srv.numListeners() == 0 { + if srv.closeIdleConns() { return lnerr } select { @@ -2815,12 +2827,6 @@ func (srv *Server) RegisterOnShutdown(f func()) { srv.mu.Lock() srv.onShutdown = append(srv.onShutdown, f) srv.mu.Unlock() -} - -func (s *Server) numListeners() int { - s.mu.Lock() - defer s.mu.Unlock() - return len(s.listeners) } // closeIdleConns closes all idle connections and reports whether the @@ -3157,8 +3163,10 @@ if s.shuttingDown() { return false } s.listeners[ln] = struct{}{} + s.listenerGroup.Add(1) } else { delete(s.listeners, ln) + s.listenerGroup.Done() } return true } diff --git a/src/net/url/url.go b/src/net/url/url.go index db4d6385e334792257c78f8ad080ad73898eaea4..e82ae6aeefd552d78efdee4f270ef37afc167357 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -1193,7 +1193,7 @@ // Any sequences of multiple / characters will be reduced to a single /. func (u *URL) JoinPath(elem ...string) *URL { url := *u if len(elem) > 0 { - elem = append([]string{u.Path}, elem...) + elem = append([]string{u.EscapedPath()}, elem...) p := path.Join(elem...) // path.Join will remove any trailing slashes. // Preserve at least one. diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 478cc34872700419ac3607a5cf53f6e2426e5aee..263eddffcf35d821bd3df4758f466bf17945faf6 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -2120,6 +2120,16 @@ elem: nil, out: "https://go.googlesource.com/", }, { + base: "https://go.googlesource.com/a%2fb", + elem: []string{"c"}, + out: "https://go.googlesource.com/a%2fb/c", + }, + { + base: "https://go.googlesource.com/a%2fb", + elem: []string{"c%2fd"}, + out: "https://go.googlesource.com/a%2fb/c%2fd", + }, + { base: "/", elem: nil, out: "/", diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go index 847a78133d3d4c157d776dd619a36e27c1b0ce23..b5cc4b8cf3fc32c09101e3d3008cd1db4ec769f8 100644 --- a/src/path/filepath/match.go +++ b/src/path/filepath/match.go @@ -240,6 +240,16 @@ // Glob ignores file system errors such as I/O errors reading directories. // The only possible returned error is ErrBadPattern, when pattern // is malformed. func Glob(pattern string) (matches []string, err error) { + return globWithLimit(pattern, 0) +} + +func globWithLimit(pattern string, depth int) (matches []string, err error) { + // This limit is used prevent stack exhaustion issues. See CVE-2022-30632. + const pathSeparatorsLimit = 10000 + if depth == pathSeparatorsLimit { + return nil, ErrBadPattern + } + // Check pattern is well-formed. if _, err := Match(pattern, ""); err != nil { return nil, err @@ -269,7 +279,7 @@ return nil, ErrBadPattern } var m []string - m, err = Glob(dir) + m, err = globWithLimit(dir, depth+1) if err != nil { return } diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go index 375c41a7e9d5d06615ba59451850bc8c552b45a9..d6282596fedbb93aeabc93fdc8029d43266098ae 100644 --- a/src/path/filepath/match_test.go +++ b/src/path/filepath/match_test.go @@ -155,6 +155,16 @@ } } } +func TestCVE202230632(t *testing.T) { + // Prior to CVE-2022-30632, this would cause a stack exhaustion given a + // large number of separators (more than 4,000,000). There is now a limit + // of 10,000. + _, err := Glob("/*" + strings.Repeat("/", 10001)) + if err != ErrBadPattern { + t.Fatalf("Glob returned err=%v, want ErrBadPattern", err) + } +} + func TestGlobError(t *testing.T) { bad := []string{`[]`, `nonexist/[]`} for _, pattern := range bad { diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index 7c785900db7c45b92067227d19ba65abf92565e1..1f484fb9b6df471de8a29d40d944352c2844e3aa 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -173,10 +173,6 @@ // Count the allocs in inconsistent, internal stats. bytesAllocated := slotsUsed * int64(s.elemsize) gcController.totalAlloc.Add(bytesAllocated) - // Update heapLive and flush scanAlloc. - gcController.update(bytesAllocated, int64(c.scanAlloc)) - c.scanAlloc = 0 - // Clear the second allocCount just to be safe. s.allocCountBeforeCache = 0 } @@ -198,6 +194,23 @@ // Store the current alloc count for accounting later. s.allocCountBeforeCache = s.allocCount + // Update heapLive and flush scanAlloc. + // + // We have not yet allocated anything new into the span, but we + // assume that all of its slots will get used, so this makes + // heapLive an overestimate. + // + // When the span gets uncached, we'll fix up this overestimate + // if necessary (see releaseAll). + // + // We pick an overestimate here because an underestimate leads + // the pacer to believe that it's in better shape than it is, + // which appears to lead to more memory used. See #53738 for + // more details. + usedBytes := uintptr(s.allocCount) * s.elemsize + gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc)) + c.scanAlloc = 0 + c.alloc[spc] = s } @@ -247,6 +260,8 @@ // Take this opportunity to flush scanAlloc. scanAlloc := int64(c.scanAlloc) c.scanAlloc = 0 + sg := mheap_.sweepgen + dHeapLive := int64(0) for i := range c.alloc { s := c.alloc[i] if s != &emptymspan { @@ -262,6 +277,15 @@ // Adjust the actual allocs in inconsistent, internal stats. // We assumed earlier that the full span gets allocated. gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize)) + if s.sweepgen != sg+1 { + // refill conservatively counted unallocated slots in gcController.heapLive. + // Undo this. + // + // If this span was cached before sweep, then gcController.heapLive was totally + // recomputed since caching this span, so we don't do this for stale spans. + dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize) + } + // Release the span to the mcentral. mheap_.central[i].mcentral.uncacheSpan(s) c.alloc[i] = &emptymspan @@ -277,8 +301,8 @@ atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) c.tinyAllocs = 0 memstats.heapStats.release() - // Updated heapScan. - gcController.update(0, scanAlloc) + // Update heapLive and heapScan. + gcController.update(dHeapLive, scanAlloc) } // prepareForSweep flushes c if the system has entered a new sweep phase diff --git a/src/runtime/race/README b/src/runtime/race/README index eb18ad600b11cfa7ee9be831d5ca719111d76057..ad8f55fb73da6ab13f02f4b10fd084315d9c40bf 100644 --- a/src/runtime/race/README +++ b/src/runtime/race/README @@ -4,9 +4,9 @@ the LLVM project (https://github.com/llvm/llvm-project/tree/main/compiler-rt). To update the .syso files use golang.org/x/build/cmd/racebuild. -race_darwin_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8. -race_freebsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8. -race_linux_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8. +race_darwin_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6. +race_freebsd_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6. +race_linux_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6. race_linux_ppc64le.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8. race_netbsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8. race_windows_amd64.syso built with LLVM 89f7ccea6f6488c443655880229c54db1f180153 and Go f62d3202bf9dbb3a00ad2a2c63ff4fa4188c5d3b. diff --git a/src/runtime/race/race_darwin_amd64.syso b/src/runtime/race/race_darwin_amd64.syso index dde17add91495ad4459c341c1558a3095911be9a..e5d848c883c2bfd36689489939bef4fd42ef4b46 100644 Binary files a/src/runtime/race/race_darwin_amd64.syso and b/src/runtime/race/race_darwin_amd64.syso differ diff --git a/src/runtime/race/race_freebsd_amd64.syso b/src/runtime/race/race_freebsd_amd64.syso index 8be9ff7a646a3a3c072412c63bff8d3666f4c5fd..b3a438347f1b2b1dc46d8752c0babe968cecf6a1 100644 Binary files a/src/runtime/race/race_freebsd_amd64.syso and b/src/runtime/race/race_freebsd_amd64.syso differ diff --git a/src/runtime/race/race_linux_amd64.syso b/src/runtime/race/race_linux_amd64.syso index a23064efac5c3e74232e4ed270d10f77abfc2c83..6885610f2566a51c74aece0edb1e9bad04be6301 100644 Binary files a/src/runtime/race/race_linux_amd64.syso and b/src/runtime/race/race_linux_amd64.syso differ diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 197683bc69353af74bd1b2078b32141d80961386..49147ff8381e3b45cea2f51cfc9792a3b90fcd08 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -1120,7 +1120,7 @@ // If fixed is true, any goroutine that can vary between user and // system (that is, the finalizer goroutine) is considered a user // goroutine. func isSystemGoroutine(gp *g, fixed bool) bool { - // Keep this in sync with cmd/trace/trace.go:isSystemGoroutine. + // Keep this in sync with internal/trace.IsSystemGoroutine. f := findfunc(gp.startpc) if !f.valid() { return false diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index ede8247da93edead95bacab648192b18fec711ee..554aad412d3634fec230a024fc8423c30891b8da 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -43,8 +43,8 @@ // This implies Setpgid. The Ctty field must be set to // the descriptor of the controlling TTY. // Unlike Setctty, in this case Ctty must be a descriptor // number in the parent process. - Foreground bool - Pgid int // Child's process group ID if Setpgid. + Foreground bool + Pgid int // Child's process group ID if Setpgid. // Pdeathsig, if non-zero, is a signal that the kernel will send to // the child process when the creating thread dies. Note that the signal // is sent on thread termination, which may happen before process termination. diff --git a/test/fixedbugs/issue53635.go b/test/fixedbugs/issue53635.go new file mode 100644 index 0000000000000000000000000000000000000000..bea54938055d1e236a2ffeac83427c3a5b04667c --- /dev/null +++ b/test/fixedbugs/issue53635.go @@ -0,0 +1,31 @@ +// run + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + f[int]() +} + +func f[T any]() { + switch []T(nil) { + case nil: + default: + panic("FAIL") + } + + switch (func() T)(nil) { + case nil: + default: + panic("FAIL") + } + + switch (map[int]T)(nil) { + case nil: + default: + panic("FAIL") + } +} diff --git a/test/run.go b/test/run.go index 8934e23b3857a1a64a2f5e9bd932626a459438be..cb1622ccc9763d3504ce09efce109ea72662e2c4 100644 --- a/test/run.go +++ b/test/run.go @@ -1966,7 +1966,6 @@ var go118Failures = setOf( "typeparam/nested.go", // 1.18 compiler doesn't support function-local types with generics "typeparam/issue51521.go", // 1.18 compiler produces bad panic message and link error - "typeparam/issue53419.go", // 1.18 compiler mishandles generic selector resolution ) // In all of these cases, the 1.17 compiler reports reasonable errors, but either the