doc/godebug.md | 6 +++--- lib/wasm/go_wasip1_wasm_exec | 2 +- src/cmd/cgo/internal/testsanitizers/asan_test.go | 2 ++ src/cmd/cgo/internal/testsanitizers/cc_test.go | 2 +- src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s | 8 ++++++++ src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go | 11 +++++++++++ src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s | 8 ++++++++ src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go | 20 ++++++++++++++++++++ src/cmd/compile/internal/base/debug.go | 1 + src/cmd/compile/internal/base/flag.go | 4 ++++ src/cmd/compile/internal/base/hashdebug.go | 1 + src/cmd/compile/internal/escape/escape.go | 40 +++++++++++++++++++++++++++++----------- src/cmd/compile/internal/ssa/_gen/README | 5 +++++ src/cmd/compile/internal/ssa/_gen/allocators.go | 2 +- src/cmd/compile/internal/ssa/_gen/main.go | 14 +++++++++++++- src/cmd/compile/internal/ssa/_gen/rulegen.go | 2 +- src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE | 27 +++++++++++++++++++++++++++ src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS | 22 ++++++++++++++++++++++ src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go | 654 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go | 490 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go | 486 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go | 11 +++++++++++ src/cmd/compile/internal/ssa/_gen/vendor/modules.txt | 3 +++ src/cmd/compile/internal/ssa/generate_test.go | 135 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/cmd/compile/internal/types2/api_test.go | 5 +++++ src/cmd/compile/internal/types2/interface.go | 18 ++++++++---------- src/cmd/compile/internal/walk/order.go | 8 ++++++-- src/cmd/dist/test.go | 27 +++++++++++++++++++++++---- src/cmd/go/alldocs.go | 44 ++++++++++++++++++++++++-------------------- src/cmd/go/internal/bug/bug.go | 2 +- src/cmd/go/internal/cache/default.go | 2 +- src/cmd/go/internal/doc/doc.go | 4 ++++ src/cmd/go/internal/fips140/fips140.go | 6 +++++- src/cmd/internal/doc/main.go | 12 ++++++++++-- src/cmd/link/internal/loader/loader.go | 59 +++++++++++++++++++++++++++++++++++++++++++++-------- src/cmd/trace/gen.go | 6 +++--- src/context/context_test.go | 2 +- src/crypto/cipher/gcm.go | 2 +- src/crypto/ecdsa/ecdsa.go | 22 +++++++++++----------- src/crypto/hmac/hmac_test.go | 13 +++++++++++++ src/crypto/internal/fips140/hmac/hmac.go | 20 +++++++++++++++----- src/crypto/internal/fips140/subtle/xor_arm.s | 149 ----------------------------------------------------- src/crypto/internal/fips140/subtle/xor_asm.go | 2 +- src/crypto/internal/fips140/subtle/xor_generic.go | 2 +- src/crypto/tls/auth.go | 59 +++++++++++++++++++++++------------------------------ src/crypto/tls/bogo_config.json | 16 ++++++++++++---- src/crypto/tls/bogo_shim_test.go | 33 ++++++++++++++++++++++++++++++++- src/crypto/tls/handshake_client_tls13.go | 3 ++- src/crypto/tls/handshake_server_tls13.go | 3 ++- src/encoding/json/bench_test.go | 9 +++------ src/encoding/json/decode.go | 13 ++++++++----- src/encoding/json/decode_test.go | 21 +++++++++++++++++++++ src/encoding/json/encode.go | 38 +++++++++++++++++++++++++++++++++++--- src/encoding/json/internal/jsonopts/options.go | 8 ++++---- src/encoding/json/jsontext/doc.go | 9 +++++++-- src/encoding/json/jsontext/encode.go | 26 +++++++++++++------------- src/encoding/json/jsontext/state.go | 4 ++-- src/encoding/json/jsontext/token.go | 8 ++++---- src/encoding/json/v2/arshal.go | 42 ++++++++++++++++++++++++++---------------- src/encoding/json/v2/arshal_inlined.go | 2 +- src/encoding/json/v2/arshal_test.go | 90 +++++++++++++++++++++++++++++++++++++---------------- src/encoding/json/v2/arshal_time.go | 191 +++++++++++++++++++++++++++++++++++++++++++++++++++-- src/encoding/json/v2/arshal_time_test.go | 302 ++++++++++++++++++++++++++++++++++------------------- src/encoding/json/v2/bench_test.go | 3 ++- src/encoding/json/v2/doc.go | 5 +++++ src/encoding/json/v2/example_test.go | 45 ++++++++++++++++++++++++--------------------- src/encoding/json/v2/fields.go | 8 ++++++++ src/encoding/json/v2/fields_test.go | 13 +++++++++++++ src/encoding/json/v2/options.go | 5 +++-- src/encoding/json/v2_decode.go | 2 +- src/encoding/json/v2_decode_test.go | 21 +++++++++++++++++++++ src/encoding/json/v2_diff_test.go | 1 + src/encoding/json/v2_encode.go | 7 +++++-- src/encoding/json/v2_options.go | 2 +- src/go/doc/testdata/issue62640.0.golden | 22 ++++++++++++++++++++++ src/go/doc/testdata/issue62640.1.golden | 22 ++++++++++++++++++++++ src/go/doc/testdata/issue62640.2.golden | 25 +++++++++++++++++++++++++ src/go/doc/testdata/issue62640.go | 15 +++++++++++++++ src/go/types/api_test.go | 5 +++++ src/go/types/interface.go | 18 ++++++++---------- src/hash/hash.go | 7 ++++--- src/internal/abi/iface.go | 2 +- src/internal/buildcfg/exp.go | 4 ++-- src/internal/goexperiment/flags.go | 20 ++++++++++---------- src/internal/reflectlite/value.go | 16 +++++++++------- src/internal/synctest/synctest_test.go | 33 +++++++++++++++++++++++++-------- src/internal/trace/event.go | 5 +++++ src/internal/trace/gc.go | 2 +- src/internal/trace/testdata/testprog/gc-stress.go | 25 ++++++++++++++++++++++--- src/internal/trace/testdata/testprog/stacks.go | 14 ++++++++++++++ src/internal/trace/testtrace/validation.go | 2 +- src/internal/trace/trace_test.go | 42 ++++++++++++++++++++++++++++++++---------- src/internal/trace/value.go | 34 +++++++++++++--------------------- src/iter/iter.go | 2 +- src/net/http/csrf.go | 11 ++++++++--- src/net/http/roundtrip_js.go | 8 ++++++++ src/net/iprawsock.go | 3 --- src/net/tcpsock.go | 2 +- src/net/udpsock.go | 3 --- src/os/exec/exec.go | 2 +- src/os/removeall_at.go | 11 ++++++++++- src/reflect/all_test.go | 5 +++++ src/reflect/value.go | 68 +++++++++++++++++++++++++++--------------------------- src/runtime/arena.go | 10 +++++++++- src/runtime/debug.go | 8 +++++++- src/runtime/mcache.go | 8 ++++++++ src/runtime/mcentral.go | 3 --- src/runtime/mem_sbrk.go | 1 + src/runtime/mfinal.go | 5 ----- src/runtime/mgc.go | 7 ++++--- src/runtime/mgcmark.go | 4 ++-- src/runtime/mgcmark_greenteagc.go | 20 ++++++++++++++++++++ src/runtime/mheap.go | 11 ++++++++--- src/runtime/proc.go | 115 +++++++++++++++++++++++++++++++++++++++-------------- src/runtime/runtime-gdb_test.go | 3 ++- src/runtime/runtime2.go | 16 +++++++++------- src/runtime/stack.go | 7 +++++-- src/runtime/testdata/testprog/gomaxprocs.go | 14 ++++++++++++++ src/runtime/testdata/testprogcgo/needmdeadlock.go | 10 ++-------- src/runtime/trace.go | 10 ++++------ src/runtime/traceruntime.go | 2 +- src/runtime/tracestack.go | 17 ++++++++++++++++- src/runtime/tracestatus.go | 9 +++++---- src/runtime/tracetime.go | 2 +- src/slices/slices_test.go | 3 +++ src/sync/waitgroup.go | 21 +++++++++++++-------- src/testing/synctest/helper_test.go | 15 +++++++++++++++ src/testing/synctest/synctest_test.go | 12 ++++++++++++ src/testing/testing.go | 3 +++ test/fixedbugs/issue74379.go | 30 ++++++++++++++++++++++++++++++ test/fixedbugs/issue74379b.go | 32 ++++++++++++++++++++++++++++++++ test/fixedbugs/issue74379c.go | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/doc/godebug.md b/doc/godebug.md index 15be9da5df0acb94a0d63250988fab757943fa4c..d107b1baf15d7988bf073cc97958e7b8da399f8e 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -187,7 +187,7 @@ Go 1.25 switched to SHA-256 to fill in missing SubjectKeyId in crypto/x509.CreateCertificate. The setting `x509sha256skid=0` reverts to SHA-1. Go 1.25 corrected the semantics of contention reports for runtime-internal locks, -and so removed the [`runtimecontentionstacks` setting](/pkg/runtime#hdr-Environment_Variable). +and so removed the [`runtimecontentionstacks` setting](/pkg/runtime#hdr-Environment_Variables). ### Go 1.24 @@ -369,7 +369,7 @@ In particular, a common default Linux kernel configuration can result in significant memory overheads, and Go 1.22 no longer works around this default. To work around this issue without adjusting kernel settings, transparent huge pages can be disabled for Go memory with the -[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable). +[`disablethp` setting](/pkg/runtime#hdr-Environment_Variables). This behavior was backported to Go 1.21.1, but the setting is only available starting with Go 1.21.6. This setting may be removed in a future release, and users impacted by this issue @@ -381,7 +381,7 @@ Go 1.22 added contention on runtime-internal locks to the [`mutex` profile](/pkg/runtime/pprof#Profile). Contention on these locks is always reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of runtime locks can be enabled with the [`runtimecontentionstacks` -setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have +setting](/pkg/runtime#hdr-Environment_Variables). These stack traces have non-standard semantics, see setting documentation for details. Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate) diff --git a/lib/wasm/go_wasip1_wasm_exec b/lib/wasm/go_wasip1_wasm_exec index 3b2d12ec458c5484605286295690acb944ba00e2..2de1758793f21237d9f5bdedd927bd2c39e081e2 100755 --- a/lib/wasm/go_wasip1_wasm_exec +++ b/lib/wasm/go_wasip1_wasm_exec @@ -14,7 +14,7 @@ "wazero") exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}" ;; "wasmtime" | "") - exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}" + exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=8388608 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}" ;; *) echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME" diff --git a/src/cmd/cgo/internal/testsanitizers/asan_test.go b/src/cmd/cgo/internal/testsanitizers/asan_test.go index c2cdf7b6d645e881ec627df55ebcb10496bb5681..dd01bd0dc0bde904363b0591ccfe37ead7cd2a12 100644 --- a/src/cmd/cgo/internal/testsanitizers/asan_test.go +++ b/src/cmd/cgo/internal/testsanitizers/asan_test.go @@ -42,6 +42,8 @@ {src: "asan_global2_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global2_fail.go:19"}, {src: "asan_global3_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global3_fail.go:13"}, {src: "asan_global4_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global4_fail.go:21"}, {src: "asan_global5.go"}, + {src: "asan_global_asm"}, + {src: "asan_global_asm2_fail", memoryAccessError: "global-buffer-overflow", errorLocation: "main.go:17"}, {src: "arena_fail.go", memoryAccessError: "use-after-poison", errorLocation: "arena_fail.go:26", experiments: []string{"arenas"}}, } for _, tc := range cases { diff --git a/src/cmd/cgo/internal/testsanitizers/cc_test.go b/src/cmd/cgo/internal/testsanitizers/cc_test.go index 8052f34f368cedf3135ada37a87c9f88759bc871..f2239fb9cffc2e02ac030e96d7fb8a6bcc1c6c51 100644 --- a/src/cmd/cgo/internal/testsanitizers/cc_test.go +++ b/src/cmd/cgo/internal/testsanitizers/cc_test.go @@ -554,7 +554,7 @@ } // srcPath returns the path to the given file relative to this test's source tree. func srcPath(path string) string { - return filepath.Join("testdata", path) + return "./testdata/" + path } // A tempDir manages a temporary directory within a test. diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s new file mode 100644 index 0000000000000000000000000000000000000000..b4b9766f57ac60ed94c9b5d5fda0c552fb795eaf --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s @@ -0,0 +1,8 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +DATA ·x(SB)/8, $123 +GLOBL ·x(SB), NOPTR, $8 diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go new file mode 100644 index 0000000000000000000000000000000000000000..2ae54486f34c2b6126305cb154a6ae47a48304e2 --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go @@ -0,0 +1,11 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var x uint64 + +func main() { + println(x) +} diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s new file mode 100644 index 0000000000000000000000000000000000000000..b4b9766f57ac60ed94c9b5d5fda0c552fb795eaf --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s @@ -0,0 +1,8 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +DATA ·x(SB)/8, $123 +GLOBL ·x(SB), NOPTR, $8 diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go new file mode 100644 index 0000000000000000000000000000000000000000..2d02a1b542617977aacd096d461ea4464aab1bae --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go @@ -0,0 +1,20 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "unsafe" + +var x uint64 + +func main() { + bar(&x) +} + +func bar(a *uint64) { + p := (*uint64)(unsafe.Add(unsafe.Pointer(a), 1*unsafe.Sizeof(uint64(1)))) + if *p == 10 { // BOOM + println("its value is 10") + } +} diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 10393e773c366dceda48afada81600b3aac440c6..85873dcc40e1b31279c9c8b20cb4df3efcb75130 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -40,6 +40,7 @@ Gossahash string `help:"hash value for use in debugging the compiler"` InlFuncsWithClosures int `help:"allow functions with closures to be inlined" concurrent:"ok"` InlStaticInit int `help:"allow static initialization of inlined calls" concurrent:"ok"` Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"` + LiteralAllocHash string `help:"hash value for use in debugging literal allocation optimizations" concurrent:"ok"` LoopVar int `help:"shared (0, default), 1 (private loop variables), 2, private + log"` LoopVarHash string `help:"for debugging changes in loop behavior. Overrides experiment and loopvar flag."` LocationLists int `help:"print information about DWARF location list creation"` diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index abf85c7e7869dbaf31b671d9eda3a9b476a1ebed..e87f57cdaae0206d55f1d238a8cafe57a859fe6f 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -268,6 +268,10 @@ } if Debug.PGOHash != "" { PGOHash = NewHashDebug("pgohash", Debug.PGOHash, nil) } + if Debug.LiteralAllocHash != "" { + LiteralAllocHash = NewHashDebug("literalalloc", Debug.LiteralAllocHash, nil) + } + if Debug.MergeLocalsHash != "" { MergeLocalsHash = NewHashDebug("mergelocals", Debug.MergeLocalsHash, nil) } diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go index c54b6e17aae2f0aa0d572c433578afbedac81a01..fa63deb46a3c019bde47b683a4d7b6e3ef882db6 100644 --- a/src/cmd/compile/internal/base/hashdebug.go +++ b/src/cmd/compile/internal/base/hashdebug.go @@ -56,6 +56,7 @@ var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes var LoopVarHash *HashDebug // for debugging shared/private loop variable changes var PGOHash *HashDebug // for debugging PGO optimization decisions +var LiteralAllocHash *HashDebug // for debugging literal allocation optimizations var MergeLocalsHash *HashDebug // for debugging local stack slot merging changes var VariableMakeHash *HashDebug // for debugging variable-sized make optimizations diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 06dee7ec41bf9204d9acef9aa0461b6d8e991b2e..72d40bd258d8f13155019d36cf3fbedb123922b9 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -534,10 +534,6 @@ } if n.Op() != ir.OMAKESLICE && n.Op() != ir.OCONVIFACE { return } - if base.Flag.Cfg.CoverageInfo != nil { - // Avoid altering coverage results. - return - } // Look up a cached ReassignOracle for the function, lazily computing one if needed. ro := b.reassignOracle(fn) @@ -545,6 +541,14 @@ if ro == nil { base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent) } + assignTemp := func(n ir.Node, init *ir.Nodes) { + // Preserve any side effects of n by assigning it to an otherwise unused temp. + pos := n.Pos() + tmp := typecheck.TempAt(pos, fn, n.Type()) + init.Append(typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp))) + init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, tmp, n))) + } + switch n.Op() { case ir.OMAKESLICE: // Check if we can replace a non-constant argument to make with @@ -556,13 +560,21 @@ if n.Cap == nil { r = &n.Len } - if s := ro.StaticValue(*r); s.Op() == ir.OLITERAL { - lit, ok := s.(*ir.BasicLit) - if !ok || lit.Val().Kind() != constant.Int { - base.Fatalf("unexpected BasicLit Kind") - } - if constant.Compare(lit.Val(), token.GEQ, constant.MakeInt64(0)) { - *r = lit + if (*r).Op() != ir.OLITERAL { + if s := ro.StaticValue(*r); s.Op() == ir.OLITERAL { + lit, ok := s.(*ir.BasicLit) + if !ok || lit.Val().Kind() != constant.Int { + base.Fatalf("unexpected BasicLit Kind") + } + if constant.Compare(lit.Val(), token.GEQ, constant.MakeInt64(0)) { + if !base.LiteralAllocHash.MatchPos(n.Pos(), nil) { + // De-selected by literal alloc optimizations debug hash. + return + } + // Preserve any side effects of the original expression, then replace it. + assignTemp(*r, n.PtrInit()) + *r = lit + } } } case ir.OCONVIFACE: @@ -572,9 +584,15 @@ conv := n.(*ir.ConvExpr) if conv.X.Op() != ir.OLITERAL && !conv.X.Type().IsInterface() { v := ro.StaticValue(conv.X) if v != nil && v.Op() == ir.OLITERAL && ir.ValidTypeForConst(conv.X.Type(), v.Val()) { + if !base.LiteralAllocHash.MatchPos(n.Pos(), nil) { + // De-selected by literal alloc optimizations debug hash. + return + } if base.Debug.EscapeDebug >= 3 { base.WarnfAt(n.Pos(), "rewriting OCONVIFACE value from %v (%v) to %v (%v)", conv.X, conv.X.Type(), v, v.Type()) } + // Preserve any side effects of the original expression, then replace it. + assignTemp(conv.X, conv.PtrInit()) v := v.(*ir.BasicLit) conv.X = ir.NewBasicLit(conv.X.Pos(), conv.X.Type(), v.Val()) typecheck.Expr(conv) diff --git a/src/cmd/compile/internal/ssa/_gen/README b/src/cmd/compile/internal/ssa/_gen/README index 74b81c2814330c354b84a3f35a3513e63f101b29..a8242f9352709719532987140688f5c6206b4544 100644 --- a/src/cmd/compile/internal/ssa/_gen/README +++ b/src/cmd/compile/internal/ssa/_gen/README @@ -9,3 +9,8 @@ more information. To regenerate everything, run "go generate" on the ssa package in the parent directory. + +The parent directory contains a test in generate_test.go that will fail +if the generated files are not up-to-date, and to allow that test to +run in no-network environments, golang.org/x/tools/go/ast/astutil is +vendored. diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go index 682fc5f20217e94350e907139a1584314b3963e3..38acc5133abe86a8fca3f57d9639db997db56ae0 100644 --- a/src/cmd/compile/internal/ssa/_gen/allocators.go +++ b/src/cmd/compile/internal/ssa/_gen/allocators.go @@ -155,7 +155,7 @@ fmt.Printf("%s\n", w.Bytes()) panic(err) } - if err := os.WriteFile("../allocators.go", b, 0666); err != nil { + if err := os.WriteFile(outFile("allocators.go"), b, 0666); err != nil { log.Fatalf("can't write output: %v\n", err) } } diff --git a/src/cmd/compile/internal/ssa/_gen/main.go b/src/cmd/compile/internal/ssa/_gen/main.go index 3f65831b6e02b5ec429c5d87ca215b0ca2561343..f20603245ba3216d1de67b3aee59e1a5f527af62 100644 --- a/src/cmd/compile/internal/ssa/_gen/main.go +++ b/src/cmd/compile/internal/ssa/_gen/main.go @@ -113,6 +113,7 @@ var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") var memprofile = flag.String("memprofile", "", "write memory profile to `file`") var tracefile = flag.String("trace", "", "write trace to `file`") +var outDir = flag.String("outdir", "..", "directory in which to write generated files") func main() { flag.Parse() @@ -142,6 +143,13 @@ if err := trace.Start(f); err != nil { log.Fatalf("failed to start trace: %v", err) } defer trace.Stop() + } + + if *outDir != ".." { + err := os.MkdirAll(*outDir, 0755) + if err != nil { + log.Fatalf("failed to create output directory: %v", err) + } } slices.SortFunc(archs, func(a, b arch) int { @@ -191,6 +199,10 @@ if err := pprof.WriteHeapProfile(f); err != nil { log.Fatal("could not write memory profile: ", err) } } +} + +func outFile(file string) string { + return *outDir + "/" + file } func genOp() { @@ -500,7 +512,7 @@ fmt.Printf("%s\n", w.Bytes()) panic(err) } - if err := os.WriteFile("../opGen.go", b, 0666); err != nil { + if err := os.WriteFile(outFile("opGen.go"), b, 0666); err != nil { log.Fatalf("can't write output: %v\n", err) } diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index c2891da6c8d368b2296815cd09606047577f85af..3854d58b7f9e56cf700434a4dda97bdd557e7a48 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -322,7 +322,7 @@ } file = astutil.Apply(file, pre, post).(*ast.File) // Write the well-formatted source to file - f, err := os.Create("../rewrite" + arch.name + suff + ".go") + f, err := os.Create(outFile("rewrite" + arch.name + suff + ".go")) if err != nil { log.Fatalf("can't write output: %v", err) } diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2a7cf70da6e498df9c11ab6a5eaa2ddd7af34da4 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000000000000000000000000000000000000..733099041f84fa1e58611ab2e11af51c1f26d1d2 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 0000000000000000000000000000000000000000..6e34df46130b7543f2c878fb02434ad68d34bba1 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,654 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := n.Type.TypeParams; tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 0000000000000000000000000000000000000000..a6b5ed0a8933eb4a40712311cb6622f08ac21dd6 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,490 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). +func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 0000000000000000000000000000000000000000..58934f76633d50db0835e36acd732fb177165706 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,486 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 0000000000000000000000000000000000000000..ca71e3e1055387c89bf1d60130b420e93af04a41 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +// Deprecated: use [ast.Unparen]. +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt b/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt new file mode 100644 index 0000000000000000000000000000000000000000..2efa97223356a23bc0a020df9b017e456273915d --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt @@ -0,0 +1,3 @@ +# golang.org/x/tools v0.27.0 +## explicit; go 1.22.0 +golang.org/x/tools/go/ast/astutil diff --git a/src/cmd/compile/internal/ssa/generate_test.go b/src/cmd/compile/internal/ssa/generate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d65288c399996fc702ece9c97789fec04a700ecf --- /dev/null +++ b/src/cmd/compile/internal/ssa/generate_test.go @@ -0,0 +1,135 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "bytes" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +const expectedHeader = "// Code generated from _gen/" // this is the common part + +// TestGeneratedFilesUpToDate regenerates all the rewrite and rewrite-related +// files defined in _gen into a temporary directory, +// checks that they match what appears in the source tree, +// verifies that they start with the prefix of a generated header, +// and checks that the only source files with that header were actually generated. +func TestGeneratedFilesUpToDate(t *testing.T) { + testenv.MustHaveGoRun(t) + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get current working directory: %v", err) + } + genDir := filepath.Join(wd, "_gen") + if _, err := os.Stat(genDir); os.IsNotExist(err) { + t.Fatalf("_gen directory not found") + } + + tmpdir := t.TempDir() + + // Accumulate a list of all existing files that look generated. + // It's an error if this set does not match the set that are + // generated into tmpdir. + genFiles := make(map[string]bool) + genPrefix := []byte(expectedHeader) + ssaFiles, err := filepath.Glob(filepath.Join(wd, "*.go")) + if err != nil { + t.Fatalf("could not glob for .go files in ssa directory: %v", err) + } + for _, f := range ssaFiles { + contents, err := os.ReadFile(f) + if err != nil { + t.Fatalf("could not read source file from ssa directory: %v", err) + } + // verify that the generated file has the expected header + // (this should cause other failures later, but if this is + // the problem, diagnose it here to shorten the treasure hunt.) + if bytes.HasPrefix(contents, genPrefix) { + genFiles[filepath.Base(f)] = true + } + } + + goFiles, err := filepath.Glob(filepath.Join(genDir, "*.go")) + if err != nil { + t.Fatalf("could not glob for .go files in _gen: %v", err) + } + if len(goFiles) == 0 { + t.Fatal("no .go files found in _gen") + } + + // Construct the command line for "go run". + // Explicitly list the files, just to make it + // clear what is included (if the test is logging). + args := []string{"run", "-C", genDir} + for _, f := range goFiles { + args = append(args, filepath.Base(f)) + } + args = append(args, "-outdir", tmpdir) + + logArgs := fmt.Sprintf("%v", args) + logArgs = logArgs[1 : len(logArgs)-2] // strip '[' and ']' + t.Logf("%s %v", testenv.GoToolPath(t), logArgs) + output, err := testenv.Command(t, testenv.GoToolPath(t), args...).CombinedOutput() + + if err != nil { + t.Fatalf("go run in _gen failed: %v\n%s", err, output) + } + + // Compare generated files with existing files in the parent directory. + files, err := os.ReadDir(tmpdir) + if err != nil { + t.Fatalf("could not read tmpdir %s: %v", tmpdir, err) + } + + for _, file := range files { + if file.IsDir() { + continue + } + filename := file.Name() + + // filename must be in the generated set, + if !genFiles[filename] { + t.Errorf("%s does not start with the expected header '%s' (if the header was changed the test needs to be updated)", + filename, expectedHeader) + } + genFiles[filename] = false // remove from set + + generatedPath := filepath.Join(tmpdir, filename) + originalPath := filepath.Join(wd, filename) + + generatedData, err := os.ReadFile(generatedPath) + if err != nil { + t.Errorf("could not read generated file %s: %v", generatedPath, err) + continue + } + + // there should be a corresponding file in the ssa directory, + originalData, err := os.ReadFile(originalPath) + if err != nil { + if os.IsNotExist(err) { + t.Errorf("generated file %s was created, but does not exist in the ssa directory. It may need to be added to the repository.", filename) + } else { + t.Errorf("could not read original file %s: %v", originalPath, err) + } + continue + } + + // and the contents of that file should match. + if !bytes.Equal(originalData, generatedData) { + t.Errorf("%s is out of date. Please run 'go generate'.", filename) + } + } + + // the generated set should be empty now. + for file, notGenerated := range genFiles { + if notGenerated { + t.Errorf("%s has the header of a generated file but was not generated", file) + } + } +} diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 44fb6afe9858ab5e2bcfe2f0118bce4b6b764309..0d3c8b8e3e5e0f692cad03dc144a4664c7e897e3 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -358,6 +358,11 @@ // go.dev/issue/47895 {`package p; import "unsafe"; type S struct { f int }; var s S; var _ = unsafe.Offsetof(s.f)`, `s.f`, `int`}, + // go.dev/issue/74303. Note that interface field types are synthetic, so + // even though `func()` doesn't appear in the source, it appears in the + // syntax tree. + {`package p; type T interface { M(int) }`, `func(int)`, `func(int)`}, + // go.dev/issue/50093 {`package u0a; func _[_ interface{int}]() {}`, `int`, `int`}, {`package u1a; func _[_ interface{~int}]() {}`, `~int`, `~int`}, diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go index b32e5c21fe250c20d02fb58c468adb0df5b39475..522f1dd3fe382fe2095807ad5089c08f78761b4a 100644 --- a/src/cmd/compile/internal/types2/interface.go +++ b/src/cmd/compile/internal/types2/interface.go @@ -137,19 +137,17 @@ // We have a method with name f.Name. name := f.Name.Value if name == "_" { check.error(f.Name, BlankIfaceMethod, "methods must have a unique non-blank name") - continue // ignore method + continue // ignore } - // Type-check method declaration. - // Note: Don't call check.typ(f.Type) as that would record - // the method incorrectly as a type expression in Info.Types. - ftyp, _ := f.Type.(*syntax.FuncType) - if ftyp == nil { - check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", f.Type) - continue // ignore method + typ := check.typ(f.Type) + sig, _ := typ.(*Signature) + if sig == nil { + if isValid(typ) { + check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", typ) + } + continue // ignore } - sig := new(Signature) - check.funcType(sig, nil, nil, ftyp) // use named receiver type if available (for better error messages) var recvTyp Type = ityp diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 8ba8dd96cc41c6241a4bec36e5a931d00cf0babc..cb022faddf29e3d1f7dc32e479fba7e0654dfa27 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -246,14 +246,18 @@ v := staticValue(n) if v == nil { v = n } + optEnabled := func(n ir.Node) bool { + // Do this optimization only when enabled for this node. + return base.LiteralAllocHash.MatchPos(n.Pos(), nil) + } if (v.Op() == ir.OSTRUCTLIT || v.Op() == ir.OARRAYLIT) && !base.Ctxt.IsFIPS() { - if ir.IsZero(v) && 0 < v.Type().Size() && v.Type().Size() <= abi.ZeroValSize { + if ir.IsZero(v) && 0 < v.Type().Size() && v.Type().Size() <= abi.ZeroValSize && optEnabled(n) { // This zero value can be represented by the read-only zeroVal. zeroVal := ir.NewLinksymExpr(v.Pos(), ir.Syms.ZeroVal, n.Type()) vstat := typecheck.Expr(zeroVal).(*ir.LinksymOffsetExpr) return vstat } - if isStaticCompositeLiteral(v) { + if isStaticCompositeLiteral(v) && optEnabled(n) { // v can be directly represented in the read-only data section. lit := v.(*ir.CompLitExpr) vstat := readonlystaticname(n.Type()) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 637433d45135a239eb8d9ff8b5f2152addec6075..aa09d1eba34be883ab053597788b27f958c7d534 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -336,6 +336,10 @@ variant string // omitVariant indicates that variant is used solely for the dist test name and // that the set of test names run by each variant (including empty) of a package // is non-overlapping. + // + // TODO(mknyszek): Consider removing omitVariant as it is no longer set to true + // by any test. It's too valuable to have timing information in ResultDB that + // corresponds directly with dist names for tests. omitVariant bool // We have both pkg and pkgs as a convenience. Both may be set, in which @@ -595,8 +599,11 @@ timelog("start", dt.name) defer timelog("end", dt.name) ranGoBench = true return (&goTest{ - variant: "racebench", - omitVariant: true, // The only execution of benchmarks in dist; benchmark names are guaranteed not to overlap with test names. + variant: "racebench", + // Include the variant even though there's no overlap in test names. + // This makes the test targets distinct, allowing our build system to record + // elapsed time for each one, which is useful for load-balancing test shards. + omitVariant: false, timeout: 1200 * time.Second, // longer timeout for race with benchmarks race: true, bench: true, @@ -736,6 +743,15 @@ }) } } + // Test GOEXPERIMENT=jsonv2. + if !strings.Contains(goexperiment, "jsonv2") { + t.registerTest("GOEXPERIMENT=jsonv2 go test encoding/json/...", &goTest{ + variant: "jsonv2", + env: []string{"GOEXPERIMENT=jsonv2"}, + pkg: "encoding/json/...", + }) + } + // Test ios/amd64 for the iOS simulator. if goos == "darwin" && goarch == "amd64" && t.cgoEnabled { t.registerTest("GOOS=ios on darwin/amd64", @@ -983,8 +999,11 @@ for shard := 0; shard < nShards; shard++ { id := fmt.Sprintf("%d_%d", shard, nShards) t.registerTest("../test", &goTest{ - variant: id, - omitVariant: true, // Shards of the same Go package; tests are guaranteed not to overlap. + variant: id, + // Include the variant even though there's no overlap in test names. + // This makes the test target more clearly distinct in our build + // results and is important for load-balancing test shards. + omitVariant: false, pkg: "cmd/internal/testdir", testFlags: []string{fmt.Sprintf("-shard=%d", shard), fmt.Sprintf("-shards=%d", nShards)}, runOnHost: true, diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index e8034bf5d17a2e5e5e25003ef64af84564514832..7403b92cd14a3c46771350cd49617b7509cb5213 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -405,6 +405,8 @@ // Examples: // // go doc // Show documentation for current package. +// go doc -http +// Serve HTML documentation over HTTP for the current package. // go doc Foo // Show documentation for Foo in the current package. // (Foo starts with a capital letter so it cannot match @@ -439,26 +441,28 @@ // cd go/src/encoding/json; go doc decode // // Flags: // -// -all -// Show all the documentation for the package. -// -c -// Respect case when matching symbols. -// -cmd -// Treat a command (package main) like a regular package. -// Otherwise package main's exported symbols are hidden -// when showing the package's top-level documentation. -// -short -// One-line representation for each symbol. -// -src -// Show the full source code for the symbol. This will -// display the full Go source of its declaration and -// definition, such as a function definition (including -// the body), type declaration or enclosing const -// block. The output may therefore include unexported -// details. -// -u -// Show documentation for unexported as well as exported -// symbols, methods, and fields. +// -all +// Show all the documentation for the package. +// -c +// Respect case when matching symbols. +// -cmd +// Treat a command (package main) like a regular package. +// Otherwise package main's exported symbols are hidden +// when showing the package's top-level documentation. +// -http +// Serve HTML docs over HTTP. +// -short +// One-line representation for each symbol. +// -src +// Show the full source code for the symbol. This will +// display the full Go source of its declaration and +// definition, such as a function definition (including +// the body), type declaration or enclosing const +// block. The output may therefore include unexported +// details. +// -u +// Show documentation for unexported as well as exported +// symbols, methods, and fields. // // # Print Go environment information // diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go index d3f9065d3da4e5ce15574b14b48ac6a5d17f076f..4ff45d2d888c9671e366ef9781a57543d8aae96f 100644 --- a/src/cmd/go/internal/bug/bug.go +++ b/src/cmd/go/internal/bug/bug.go @@ -69,7 +69,7 @@ diff --git a/src/cmd/go/internal/cache/default.go b/src/cmd/go/internal/cache/default.go index b2dd69edc539a2552670d771dff068e6a5592a36..cc4e0517b4a12d09c858b66fa5caa92a32d4b0be 100644 --- a/src/cmd/go/internal/cache/default.go +++ b/src/cmd/go/internal/cache/default.go @@ -28,7 +28,7 @@ // README as a courtesy to explain where it came from. const cacheREADME = `This directory holds cached build artifacts from the Go build system. Run "go clean -cache" if the directory is getting too large. Run "go clean -fuzzcache" to delete the fuzz cache. -See golang.org to learn more about Go. +See go.dev to learn more about Go. ` // initDefaultCache does the work of finding the default cache diff --git a/src/cmd/go/internal/doc/doc.go b/src/cmd/go/internal/doc/doc.go index 131da814951d0f7ac7879c3b137b4d8b7c810c54..74c70e2c7a50164cfa2a377dfe4529ccab8ccf9e 100644 --- a/src/cmd/go/internal/doc/doc.go +++ b/src/cmd/go/internal/doc/doc.go @@ -75,6 +75,8 @@ Examples: go doc Show documentation for current package. + go doc -http + Serve HTML documentation over HTTP for the current package. go doc Foo Show documentation for Foo in the current package. (Foo starts with a capital letter so it cannot match @@ -116,6 +118,8 @@ -cmd Treat a command (package main) like a regular package. Otherwise package main's exported symbols are hidden when showing the package's top-level documentation. + -http + Serve HTML docs over HTTP. -short One-line representation for each symbol. -src diff --git a/src/cmd/go/internal/fips140/fips140.go b/src/cmd/go/internal/fips140/fips140.go index 328e06088e3d47e3d4553c52cabd89a1a662f666..7ca0cde5880eeda6faa46774cef092b8d6379a51 100644 --- a/src/cmd/go/internal/fips140/fips140.go +++ b/src/cmd/go/internal/fips140/fips140.go @@ -114,7 +114,11 @@ if Snapshot() { fsys.Bind(Dir(), filepath.Join(cfg.GOROOT, "src/crypto/internal/fips140")) } - if cfg.Experiment.BoringCrypto && Enabled() { + // ExperimentErr != nil if GOEXPERIMENT failed to parse. Typically + // cmd/go main will exit in this case, but it is allowed during + // toolchain selection, as the GOEXPERIMENT may be valid for the + // selected toolchain version. + if cfg.ExperimentErr == nil && cfg.Experiment.BoringCrypto && Enabled() { base.Fatalf("go: cannot use GOFIPS140 with GOEXPERIMENT=boringcrypto") } } diff --git a/src/cmd/internal/doc/main.go b/src/cmd/internal/doc/main.go index fe99ee70bd3a48b59db84e26a4e90f4de9fa1abc..c51fbef5172497c6a1bab7b8409bd69aebea9fea 100644 --- a/src/cmd/internal/doc/main.go +++ b/src/cmd/internal/doc/main.go @@ -227,8 +227,16 @@ vars, err := runCmd(nil, "go", "env", "GOPROXY", "GOMODCACHE") fields := strings.Fields(vars) if err == nil && len(fields) == 2 { goproxy, gomodcache := fields[0], fields[1] - goproxy = "file://" + filepath.Join(gomodcache, "cache", "download") + "," + goproxy - env = append(env, "GOPROXY="+goproxy) + gomodcache = filepath.Join(gomodcache, "cache", "download") + // Convert absolute path to file URL. pkgsite will not accept + // Windows absolute paths because they look like a host:path remote. + // TODO(golang.org/issue/32456): use url.FromFilePath when implemented. + if strings.HasPrefix(gomodcache, "/") { + gomodcache = "file://" + gomodcache + } else { + gomodcache = "file:///" + filepath.ToSlash(gomodcache) + } + env = append(env, "GOPROXY="+gomodcache+","+goproxy) } const version = "v0.0.0-20250608123103-82c52f1754cd" diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 6561362210d796ee6fcdc9df62b4e1c03af11a0e..b1891bac43d858bd00d9c033a72d9380692deddf 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -253,6 +253,12 @@ CgoExports map[string]Sym WasmExports []Sym + // sizeFixups records symbols that we need to fix up the size + // after loading. It is very rarely needed, only for a DATA symbol + // and a BSS symbol with the same name, and the BSS symbol has + // larger size. + sizeFixups []symAndSize + flags uint32 strictDupMsgs int // number of strict-dup warning/errors, when FlagStrictDups is enabled @@ -469,18 +475,17 @@ // // In summary, the "overwrite" variable and the final result are // // new sym old sym result - // --------------------------------------------- + // ------------------------------------------------------- // TEXT BSS new wins // DATA DATA ERROR // DATA lg/eq BSS sm/eq new wins - // DATA small BSS large ERROR - // BSS large DATA small ERROR + // DATA small BSS large merge: new with larger size + // BSS large DATA small merge: old with larger size // BSS large BSS small new wins // BSS sm/eq D/B lg/eq old wins // BSS TEXT old wins oldtyp := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] newtyp := sym.AbiSymKindToSymKind[objabi.SymKind(osym.Type())] - oldIsText := oldtyp.IsText() newIsText := newtyp.IsText() oldHasContent := oldr.DataSize(oldli) != 0 newHasContent := r.DataSize(li) != 0 @@ -488,12 +493,28 @@ oldIsBSS := oldtyp.IsData() && !oldHasContent newIsBSS := newtyp.IsData() && !newHasContent switch { case newIsText && oldIsBSS, - newHasContent && oldIsBSS && sz >= oldsz, + newHasContent && oldIsBSS, newIsBSS && oldIsBSS && sz > oldsz: // new symbol overwrites old symbol. l.objSyms[oldi] = objSym{r.objidx, li} - case newIsBSS && (oldsz >= sz || oldIsText): + if oldsz > sz { + // If the BSS symbol has a larger size, expand the data + // symbol's size so access from the BSS side cannot overrun. + // It is hard to modify the symbol size until all Go objects + // (potentially read-only) are loaded, so we record it in + // a fixup table and apply them later. This is very rare. + // One case is a global variable with a Go declaration and an + // assembly definition, which typically have the same size, + // but in ASAN mode the Go declaration has a larger size due + // to the inserted red zone. + l.sizeFixups = append(l.sizeFixups, symAndSize{oldi, uint32(oldsz)}) + } + case newIsBSS: // old win, just ignore the new symbol. + if sz > oldsz { + // See the comment above for sizeFixups. + l.sizeFixups = append(l.sizeFixups, symAndSize{oldi, uint32(sz)}) + } default: log.Fatalf("duplicated definition of symbol %s, from %s (type %s size %d) and %s (type %s size %d)", name, r.unit.Lib.Pkg, newtyp, sz, oldr.unit.Lib.Pkg, oldtyp, oldsz) } @@ -2277,6 +2298,10 @@ st.preloadSyms(r, hashed64Def) st.preloadSyms(r, hashedDef) st.preloadSyms(r, nonPkgDef) } + for _, sf := range l.sizeFixups { + pp := l.cloneToExternal(sf.sym) + pp.size = int64(sf.size) + } for _, vr := range st.linknameVarRefs { l.checkLinkname(vr.pkg, vr.name, vr.sym) } @@ -2368,7 +2393,6 @@ "crypto/internal/fips140.setIndicator": {"crypto/internal/fips140"}, "crypto/internal/sysrand.fatal": {"crypto/internal/sysrand"}, "crypto/rand.fatal": {"crypto/rand"}, "internal/runtime/maps.errNilAssign": {"internal/runtime/maps"}, - "internal/runtime/maps.typeString": {"internal/runtime/maps"}, "internal/runtime/maps.fatal": {"internal/runtime/maps"}, "internal/runtime/maps.newarray": {"internal/runtime/maps"}, "internal/runtime/maps.newobject": {"internal/runtime/maps"}, @@ -2399,6 +2423,23 @@ "runtime.mapaccess1_faststr": {"runtime"}, "runtime.mapdelete_fast32": {"runtime"}, "runtime.mapdelete_fast64": {"runtime"}, "runtime.mapdelete_faststr": {"runtime"}, + // New internal linknames in Go 1.25 + // Pushed from runtime + "internal/cpu.riscvHWProbe": {"internal/cpu"}, + "internal/runtime/cgroup.throw": {"internal/runtime/cgroup"}, + "internal/runtime/maps.typeString": {"internal/runtime/maps"}, + "internal/synctest.IsInBubble": {"internal/synctest"}, + "internal/synctest.associate": {"internal/synctest"}, + "internal/synctest.disassociate": {"internal/synctest"}, + "internal/synctest.isAssociated": {"internal/synctest"}, + "runtime/trace.runtime_readTrace": {"runtime/trace"}, + "runtime/trace.runtime_traceClockUnitsPerSecond": {"runtime/trace"}, + "sync_test.runtime_blockUntilEmptyCleanupQueue": {"sync_test"}, + "time.runtimeIsBubbled": {"time"}, + "unique.runtime_blockUntilEmptyCleanupQueue": {"unique"}, + // Others + "net.newWindowsFile": {"net"}, // pushed from os + "testing/synctest.testingSynctestTest": {"testing/synctest"}, // pushed from testing } // check if a linkname reference to symbol s from pkg is allowed @@ -2482,7 +2523,7 @@ // is that if the linker decides it wants to update the contents of // a symbol originally discovered as part of an object file, it's // easier to do this if we make the updates to an external symbol // payload. -func (l *Loader) cloneToExternal(symIdx Sym) { +func (l *Loader) cloneToExternal(symIdx Sym) *extSymPayload { if l.IsExternal(symIdx) { panic("sym is already external, no need for clone") } @@ -2534,6 +2575,8 @@ // Some attributes were encoded in the object file. Copy them over. l.SetAttrDuplicateOK(symIdx, r.Sym(li).Dupok()) l.SetAttrShared(symIdx, r.Shared()) + + return pp } // Copy the payload of symbol src to dst. Both src and dst must be external diff --git a/src/cmd/trace/gen.go b/src/cmd/trace/gen.go index 4455f830461d4809ec33eaaae4f599bc3be782ad..9cc22df1f68f413c9975037995158e2bf519eb70 100644 --- a/src/cmd/trace/gen.go +++ b/src/cmd/trace/gen.go @@ -283,11 +283,11 @@ func (g *globalMetricGenerator) GlobalMetric(ctx *traceContext, ev *trace.Event) { m := ev.Metric() switch m.Name { case "/memory/classes/heap/objects:bytes": - ctx.HeapAlloc(ctx.elapsed(ev.Time()), m.Value.ToUint64()) + ctx.HeapAlloc(ctx.elapsed(ev.Time()), m.Value.Uint64()) case "/gc/heap/goal:bytes": - ctx.HeapGoal(ctx.elapsed(ev.Time()), m.Value.ToUint64()) + ctx.HeapGoal(ctx.elapsed(ev.Time()), m.Value.Uint64()) case "/sched/gomaxprocs:threads": - ctx.Gomaxprocs(m.Value.ToUint64()) + ctx.Gomaxprocs(m.Value.Uint64()) } } diff --git a/src/context/context_test.go b/src/context/context_test.go index 57066c968596f631f63b811f35e84d97b3570ca6..ad47f853dd4b3620df67b12dcc554524546d9a74 100644 --- a/src/context/context_test.go +++ b/src/context/context_test.go @@ -5,7 +5,7 @@ package context // Tests in package context cannot depend directly on package testing due to an import cycle. -// If your test does requires access to unexported members of the context package, +// If your test requires access to unexported members of the context package, // add your test below as `func XTestFoo(t testingT)` and add a `TestFoo` to x_test.go // that calls it. Otherwise, write a regular test in a test.go file in package context_test. diff --git a/src/crypto/cipher/gcm.go b/src/crypto/cipher/gcm.go index 5580f96d55a0fb7e5766992e7fcce77d8afda696..73493f6cd2311b02fdc5edc813579fe17c6171c7 100644 --- a/src/crypto/cipher/gcm.go +++ b/src/crypto/cipher/gcm.go @@ -82,7 +82,7 @@ } // NewGCMWithRandomNonce returns the given cipher wrapped in Galois Counter // Mode, with randomly-generated nonces. The cipher must have been created by -// [aes.NewCipher]. +// [crypto/aes.NewCipher]. // // It generates a random 96-bit nonce, which is prepended to the ciphertext by Seal, // and is extracted from the ciphertext by Open. The NonceSize of the AEAD is zero, diff --git a/src/crypto/ecdsa/ecdsa.go b/src/crypto/ecdsa/ecdsa.go index 9affc1ff7861256be9b2754891f31e51dc1fbcec..340edbbaba78072424eef49e27016225f1efd8ec 100644 --- a/src/crypto/ecdsa/ecdsa.go +++ b/src/crypto/ecdsa/ecdsa.go @@ -48,9 +48,9 @@ // Modifying the raw coordinates can produce invalid keys, and may // invalidate internal optimizations; moreover, [big.Int] methods are not // suitable for operating on cryptographic values. To encode and decode // PublicKey values, use [PublicKey.Bytes] and [ParseUncompressedPublicKey] - // or [x509.MarshalPKIXPublicKey] and [x509.ParsePKIXPublicKey]. For ECDH, - // use [crypto/ecdh]. For lower-level elliptic curve operations, use a - // third-party module like filippo.io/nistec. + // or [crypto/x509.MarshalPKIXPublicKey] and [crypto/x509.ParsePKIXPublicKey]. + // For ECDH, use [crypto/ecdh]. For lower-level elliptic curve operations, + // use a third-party module like filippo.io/nistec. // // These fields will be deprecated in Go 1.26. X, Y *big.Int @@ -104,7 +104,7 @@ // [ecdh.Curve.NewPublicKey] does for NIST curves, but returns a [PublicKey] // instead of an [ecdh.PublicKey]. // // Note that public keys are more commonly encoded in DER (or PEM) format, which -// can be parsed with [x509.ParsePKIXPublicKey] (and [encoding/pem]). +// can be parsed with [crypto/x509.ParsePKIXPublicKey] (and [encoding/pem]). func ParseUncompressedPublicKey(curve elliptic.Curve, data []byte) (*PublicKey, error) { if len(data) < 1 || data[0] != 4 { return nil, errors.New("ecdsa: invalid uncompressed public key") @@ -141,7 +141,7 @@ // // Bytes returns the same format as [ecdh.PublicKey.Bytes] does for NIST curves. // // Note that public keys are more commonly encoded in DER (or PEM) format, which -// can be generated with [x509.MarshalPKIXPublicKey] (and [encoding/pem]). +// can be generated with [crypto/x509.MarshalPKIXPublicKey] (and [encoding/pem]). func (pub *PublicKey) Bytes() ([]byte, error) { switch pub.Curve { case elliptic.P224(): @@ -174,8 +174,8 @@ // // Modifying the raw value can produce invalid keys, and may // invalidate internal optimizations; moreover, [big.Int] methods are not // suitable for operating on cryptographic values. To encode and decode - // PrivateKey values, use [PrivateKey.Bytes] and [ParseRawPrivateKey] - // or [x509.MarshalPKCS8PrivateKey] and [x509.ParsePKCS8PrivateKey]. + // PrivateKey values, use [PrivateKey.Bytes] and [ParseRawPrivateKey] or + // [crypto/x509.MarshalPKCS8PrivateKey] and [crypto/x509.ParsePKCS8PrivateKey]. // For ECDH, use [crypto/ecdh]. // // This field will be deprecated in Go 1.26. @@ -244,8 +244,8 @@ // ParseRawPrivateKey accepts the same format as [ecdh.Curve.NewPrivateKey] does // for NIST curves, but returns a [PrivateKey] instead of an [ecdh.PrivateKey]. // // Note that private keys are more commonly encoded in ASN.1 or PKCS#8 format, -// which can be parsed with [x509.ParseECPrivateKey] or -// [x509.ParsePKCS8PrivateKey] (and [encoding/pem]). +// which can be parsed with [crypto/x509.ParseECPrivateKey] or +// [crypto/x509.ParsePKCS8PrivateKey] (and [encoding/pem]). func ParseRawPrivateKey(curve elliptic.Curve, data []byte) (*PrivateKey, error) { switch curve { case elliptic.P224(): @@ -283,8 +283,8 @@ // // Bytes returns the same format as [ecdh.PrivateKey.Bytes] does for NIST curves. // // Note that private keys are more commonly encoded in ASN.1 or PKCS#8 format, -// which can be generated with [x509.MarshalECPrivateKey] or -// [x509.MarshalPKCS8PrivateKey] (and [encoding/pem]). +// which can be generated with [crypto/x509.MarshalECPrivateKey] or +// [crypto/x509.MarshalPKCS8PrivateKey] (and [encoding/pem]). func (priv *PrivateKey) Bytes() ([]byte, error) { switch priv.Curve { case elliptic.P224(): diff --git a/src/crypto/hmac/hmac_test.go b/src/crypto/hmac/hmac_test.go index 9b7eee7bf7873e70e88bfa2e88fc2ba1e0ad2cd5..4046a9555a8e35fc9e7f16561d86a8d5430a2547 100644 --- a/src/crypto/hmac/hmac_test.go +++ b/src/crypto/hmac/hmac_test.go @@ -11,6 +11,7 @@ "crypto/md5" "crypto/sha1" "crypto/sha256" "crypto/sha512" + "errors" "fmt" "hash" "testing" @@ -580,6 +581,18 @@ if j == 1 { h = New(func() hash.Hash { return justHash{tt.hash()} }, tt.key) } } + } +} + +func TestNoClone(t *testing.T) { + h := New(func() hash.Hash { return justHash{sha256.New()} }, []byte("key")) + if _, ok := h.(hash.Cloner); !ok { + t.Skip("no Cloner support") + } + h.Write([]byte("test")) + _, err := h.(hash.Cloner).Clone() + if !errors.Is(err, errors.ErrUnsupported) { + t.Errorf("Clone() = %v, want ErrUnsupported", err) } } diff --git a/src/crypto/internal/fips140/hmac/hmac.go b/src/crypto/internal/fips140/hmac/hmac.go index 9b28017662564b126f31c2028510151341b3a7a5..a18b22650d105b763cd0f8ed4ae877f50edc23ff 100644 --- a/src/crypto/internal/fips140/hmac/hmac.go +++ b/src/crypto/internal/fips140/hmac/hmac.go @@ -130,26 +130,36 @@ h.opad = omarshal h.marshaled = true } +type errCloneUnsupported struct{} + +func (e errCloneUnsupported) Error() string { + return "crypto/hmac: hash does not support hash.Cloner" +} + +func (e errCloneUnsupported) Unwrap() error { + return errors.ErrUnsupported +} + // Clone implements [hash.Cloner] if the underlying hash does. -// Otherwise, it returns [errors.ErrUnsupported]. +// Otherwise, it returns an error wrapping [errors.ErrUnsupported]. func (h *HMAC) Clone() (hash.Cloner, error) { r := *h ic, ok := h.inner.(hash.Cloner) if !ok { - return nil, errors.ErrUnsupported + return nil, errCloneUnsupported{} } oc, ok := h.outer.(hash.Cloner) if !ok { - return nil, errors.ErrUnsupported + return nil, errCloneUnsupported{} } var err error r.inner, err = ic.Clone() if err != nil { - return nil, errors.ErrUnsupported + return nil, errCloneUnsupported{} } r.outer, err = oc.Clone() if err != nil { - return nil, errors.ErrUnsupported + return nil, errCloneUnsupported{} } return &r, nil } diff --git a/src/crypto/internal/fips140/subtle/xor_arm.s b/src/crypto/internal/fips140/subtle/xor_arm.s deleted file mode 100644 index a9e4267a6b75387ba7cf1e1f7b5d41f9f105145e..0000000000000000000000000000000000000000 --- a/src/crypto/internal/fips140/subtle/xor_arm.s +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVW dst+0(FP), R0 - MOVW a+4(FP), R1 - MOVW b+8(FP), R2 - MOVW n+12(FP), R3 - -xor_32_check: - CMP $32, R3 - BLT xor_16_check -xor_32_loop: - MOVW (R1), R4 - MOVW 4(R1), R5 - MOVW 8(R1), R6 - MOVW (R2), R7 - MOVW 4(R2), R8 - MOVW 8(R2), R9 - EOR R4, R7 - EOR R5, R8 - EOR R6, R9 - MOVW R7, (R0) - MOVW R8, 4(R0) - MOVW R9, 8(R0) - - MOVW 12(R1), R4 - MOVW 16(R1), R5 - MOVW 20(R1), R6 - MOVW 12(R2), R7 - MOVW 16(R2), R8 - MOVW 20(R2), R9 - EOR R4, R7 - EOR R5, R8 - EOR R6, R9 - MOVW R7, 12(R0) - MOVW R8, 16(R0) - MOVW R9, 20(R0) - - MOVW 24(R1), R4 - MOVW 28(R1), R5 - MOVW 24(R2), R6 - MOVW 28(R2), R7 - EOR R4, R6 - EOR R5, R7 - MOVW R6, 24(R0) - MOVW R7, 28(R0) - - ADD $32, R1 - ADD $32, R2 - ADD $32, R0 - SUB $32, R3 - CMP $32, R3 - BGE xor_32_loop - CMP $0, R3 - BEQ end - -xor_16_check: - CMP $16, R3 - BLT xor_8_check -xor_16: - MOVW (R1), R4 - MOVW 4(R1), R5 - MOVW (R2), R6 - MOVW 4(R2), R7 - EOR R4, R6 - EOR R5, R7 - MOVW R6, (R0) - MOVW R7, 4(R0) - - MOVW 8(R1), R4 - MOVW 12(R1), R5 - MOVW 8(R2), R6 - MOVW 12(R2), R7 - EOR R4, R6 - EOR R5, R7 - MOVW R6, 8(R0) - MOVW R7, 12(R0) - ADD $16, R1 - ADD $16, R2 - ADD $16, R0 - SUB $16, R3 - CMP $0, R3 - BEQ end - -xor_8_check: - CMP $8, R3 - BLT xor_4_check -xor_8: - MOVW (R1), R4 - MOVW 4(R1), R5 - MOVW (R2), R6 - MOVW 4(R2), R7 - EOR R4, R6 - EOR R5, R7 - MOVW R6, (R0) - MOVW R7, 4(R0) - - ADD $8, R0 - ADD $8, R1 - ADD $8, R2 - SUB $8, R3 - CMP $0, R3 - BEQ end - -xor_4_check: - CMP $4, R3 - BLT xor_2_check -xor_4: - MOVW (R1), R4 - MOVW (R2), R5 - EOR R4, R5 - MOVW R5, (R0) - ADD $4, R1 - ADD $4, R2 - ADD $4, R0 - SUB $4, R3 - CMP $0, R3 - BEQ end - -xor_2_check: - CMP $2, R3 - BLT xor_1 -xor_2: - MOVH (R1), R4 - MOVH (R2), R5 - EOR R4, R5 - MOVH R5, (R0) - ADD $2, R1 - ADD $2, R2 - ADD $2, R0 - SUB $2, R3 - CMP $0, R3 - BEQ end - -xor_1: - MOVB (R1), R4 - MOVB (R2), R5 - EOR R4, R5 - MOVB R5, (R0) - -end: - RET diff --git a/src/crypto/internal/fips140/subtle/xor_asm.go b/src/crypto/internal/fips140/subtle/xor_asm.go index 00f3497a028e28a3f330d9a572d4093eeb47f496..b07239da3e31c15c753048d391f0141c355d188c 100644 --- a/src/crypto/internal/fips140/subtle/xor_asm.go +++ b/src/crypto/internal/fips140/subtle/xor_asm.go @@ -2,7 +2,7 @@ // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (amd64 || arm || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego +//go:build (amd64 || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_generic.go b/src/crypto/internal/fips140/subtle/xor_generic.go index 06d69ca91fdcaafb061376b39afb94ce66ac03c7..ed484bc630e98d00ccd77d64bf5031c91c67a3fa 100644 --- a/src/crypto/internal/fips140/subtle/xor_generic.go +++ b/src/crypto/internal/fips140/subtle/xor_generic.go @@ -2,7 +2,7 @@ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !arm && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego +//go:build (!amd64 && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego package subtle diff --git a/src/crypto/tls/auth.go b/src/crypto/tls/auth.go index f5de7b306940df18f854d46ec0ae8c281901b170..7169e471056afa38820dab913f32e6d296f970ec 100644 --- a/src/crypto/tls/auth.go +++ b/src/crypto/tls/auth.go @@ -163,73 +163,64 @@ {PKCS1WithSHA512, 19 + crypto.SHA512.Size() + 11}, {PKCS1WithSHA1, 15 + crypto.SHA1.Size() + 11}, } -// signatureSchemesForCertificate returns the list of supported SignatureSchemes -// for a given certificate, based on the public key and the protocol version, -// and optionally filtered by its explicit SupportedSignatureAlgorithms. -func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme { - priv, ok := cert.PrivateKey.(crypto.Signer) - if !ok { - return nil - } - - var sigAlgs []SignatureScheme - switch pub := priv.Public().(type) { +func signatureSchemesForPublicKey(version uint16, pub crypto.PublicKey) []SignatureScheme { + switch pub := pub.(type) { case *ecdsa.PublicKey: - if version != VersionTLS13 { + if version < VersionTLS13 { // In TLS 1.2 and earlier, ECDSA algorithms are not // constrained to a single curve. - sigAlgs = []SignatureScheme{ + return []SignatureScheme{ ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512, ECDSAWithSHA1, } - break } switch pub.Curve { case elliptic.P256(): - sigAlgs = []SignatureScheme{ECDSAWithP256AndSHA256} + return []SignatureScheme{ECDSAWithP256AndSHA256} case elliptic.P384(): - sigAlgs = []SignatureScheme{ECDSAWithP384AndSHA384} + return []SignatureScheme{ECDSAWithP384AndSHA384} case elliptic.P521(): - sigAlgs = []SignatureScheme{ECDSAWithP521AndSHA512} + return []SignatureScheme{ECDSAWithP521AndSHA512} default: return nil } case *rsa.PublicKey: size := pub.Size() - sigAlgs = make([]SignatureScheme, 0, len(rsaSignatureSchemes)) + sigAlgs := make([]SignatureScheme, 0, len(rsaSignatureSchemes)) for _, candidate := range rsaSignatureSchemes { if size >= candidate.minModulusBytes { sigAlgs = append(sigAlgs, candidate.scheme) } } + return sigAlgs case ed25519.PublicKey: - sigAlgs = []SignatureScheme{Ed25519} + return []SignatureScheme{Ed25519} default: return nil } +} - if cert.SupportedSignatureAlgorithms != nil { - sigAlgs = slices.DeleteFunc(sigAlgs, func(sigAlg SignatureScheme) bool { - return !isSupportedSignatureAlgorithm(sigAlg, cert.SupportedSignatureAlgorithms) +// selectSignatureScheme picks a SignatureScheme from the peer's preference list +// that works with the selected certificate. It's only called for protocol +// versions that support signature algorithms, so TLS 1.2 and 1.3. +func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) { + priv, ok := c.PrivateKey.(crypto.Signer) + if !ok { + return 0, unsupportedCertificateError(c) + } + supportedAlgs := signatureSchemesForPublicKey(vers, priv.Public()) + if c.SupportedSignatureAlgorithms != nil { + supportedAlgs = slices.DeleteFunc(supportedAlgs, func(sigAlg SignatureScheme) bool { + return !isSupportedSignatureAlgorithm(sigAlg, c.SupportedSignatureAlgorithms) }) } - // Filter out any unsupported signature algorithms, for example due to // FIPS 140-3 policy, tlssha1=0, or protocol version. - sigAlgs = slices.DeleteFunc(sigAlgs, func(sigAlg SignatureScheme) bool { - return isDisabledSignatureAlgorithm(version, sigAlg, false) + supportedAlgs = slices.DeleteFunc(supportedAlgs, func(sigAlg SignatureScheme) bool { + return isDisabledSignatureAlgorithm(vers, sigAlg, false) }) - - return sigAlgs -} - -// selectSignatureScheme picks a SignatureScheme from the peer's preference list -// that works with the selected certificate. It's only called for protocol -// versions that support signature algorithms, so TLS 1.2 and 1.3. -func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) { - supportedAlgs := signatureSchemesForCertificate(vers, c) if len(supportedAlgs) == 0 { return 0, unsupportedCertificateError(c) } diff --git a/src/crypto/tls/bogo_config.json b/src/crypto/tls/bogo_config.json index 9e3990ecb56e2aa878cdb2c95daea5e8d38e9f6d..cf316718c80e892fd1a366e4631420fc6690437b 100644 --- a/src/crypto/tls/bogo_config.json +++ b/src/crypto/tls/bogo_config.json @@ -49,8 +49,6 @@ "Agree-Digest-SHA1": "We don't support SHA-1 in TLS 1.2 (without tlssha1=1)", "ServerAuth-SHA1-Fallback*": "We don't support SHA-1 in TLS 1.2 (without tlssha1=1), so we fail if there are no signature_algorithms", "Agree-Digest-SHA256": "We select signature algorithms in peer preference order. We should consider changing this.", - "ECDSACurveMismatch-Verify-TLS13": "We don't enforce the curve when verifying. This is a bug. We need to fix this.", - "*-Verify-ECDSA_P224_SHA256-TLS13": "Side effect of the bug above. BoGo sends a P-256 sigAlg with a P-224 key, and we allow it.", "V2ClientHello-*": "We don't support SSLv2", "SendV2ClientHello*": "We don't support SSLv2", @@ -74,6 +72,9 @@ "BadRSAClientKeyExchange-4": "crypto/tls doesn't check the version number in the premaster secret - see processClientKeyExchange comment", "BadRSAClientKeyExchange-5": "crypto/tls doesn't check the version number in the premaster secret - see processClientKeyExchange comment", "SupportTicketsWithSessionID": "We don't support session ID resumption", "ResumeTLS12SessionID-TLS13": "We don't support session ID resumption", + "TrustAnchors-*": "We don't support draft-beck-tls-trust-anchor-ids", + "PAKE-Extension-*": "We don't support PAKE", + "*TicketFlags": "We don't support draft-ietf-tls-tlsflags", "CheckLeafCurve": "TODO: first pass, this should be fixed", "KeyUpdate-RequestACK": "TODO: first pass, this should be fixed", @@ -206,7 +207,14 @@ "EarlyData-SkipEndOfEarlyData-TLS13": "TODO: first pass, this should be fixed", "EarlyData-Server-BadFinished-TLS13": "TODO: first pass, this should be fixed", "EarlyData-UnexpectedHandshake-Server-TLS13": "TODO: first pass, this should be fixed", "EarlyData-CipherMismatch-Client-TLS13": "TODO: first pass, this should be fixed", - "Resume-Server-UnofferedCipher-TLS13": "TODO: first pass, this should be fixed" + + "ServerNameExtensionServer-TLS-*": "https://github.com/golang/go/issues/74282", + + "Resume-Server-UnofferedCipher-TLS13": "TODO: first pass, this should be fixed", + "GarbageCertificate-Server-TLS13": "TODO: 2025/06 BoGo update, should be fixed", + "WrongMessageType-TLS13-ClientCertificate-TLS": "TODO: 2025/06 BoGo update, should be fixed", + "KeyUpdate-Requested": "TODO: 2025/06 BoGo update, should be fixed", + "AppDataBeforeTLS13KeyChange-*": "TODO: 2025/06 BoGo update, should be fixed" }, "AllCurves": [ 23, @@ -216,6 +224,6 @@ 29, 4588 ], "ErrorMap": { - ":ECH_REJECTED:": "tls: server rejected ECH" + ":ECH_REJECTED:": ["tls: server rejected ECH"] } } diff --git a/src/crypto/tls/bogo_shim_test.go b/src/crypto/tls/bogo_shim_test.go index 2e88d539c4d11921f5dbf8288befe93ea97ffb9b..7cab568db80953b03e524376ad2f9c5c9f0a2844 100644 --- a/src/crypto/tls/bogo_shim_test.go +++ b/src/crypto/tls/bogo_shim_test.go @@ -420,6 +420,12 @@ break } } if err != io.EOF { + // Flush the TLS conn and then perform a graceful shutdown of the + // TCP connection to avoid the runner side hitting an unexpected + // write error before it has processed the alert we may have + // generated for the error condition. + orderlyShutdown(tlsConn) + retryErr, ok := err.(*ECHRejectionError) if !ok { log.Fatal(err) @@ -505,6 +511,31 @@ } } } +// If the test case produces an error, we don't want to immediately close the +// TCP connection after generating an alert. The runner side may try to write +// additional data to the connection before it reads the alert. If the conn +// has already been torn down, then these writes will produce an unexpected +// broken pipe err and fail the test. +func orderlyShutdown(tlsConn *Conn) { + // Flush any pending alert data + tlsConn.flush() + + netConn := tlsConn.NetConn() + tcpConn := netConn.(*net.TCPConn) + tcpConn.CloseWrite() + + // Read and discard any data that was sent by the peer. + buf := make([]byte, maxPlaintext) + for { + n, err := tcpConn.Read(buf) + if n == 0 || err != nil { + break + } + } + + tcpConn.CloseRead() +} + func TestBogoSuite(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") @@ -526,7 +557,7 @@ var bogoDir string if *bogoLocalDir != "" { bogoDir = *bogoLocalDir } else { - const boringsslModVer = "v0.0.0-20241120195446-5cce3fbd23e1" + const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832" bogoDir = cryptotest.FetchModule(t, "boringssl.googlesource.com/boringssl.git", boringsslModVer) } diff --git a/src/crypto/tls/handshake_client_tls13.go b/src/crypto/tls/handshake_client_tls13.go index 4f4966904f59f450a4e4b356e8567f1cd80079d3..7018bb2336b8f3f01cab57c14ad3abc23c64b590 100644 --- a/src/crypto/tls/handshake_client_tls13.go +++ b/src/crypto/tls/handshake_client_tls13.go @@ -677,7 +677,8 @@ // See RFC 8446, Section 4.4.3. // We don't use hs.hello.supportedSignatureAlgorithms because it might // include PKCS#1 v1.5 and SHA-1 if the ClientHello also supported TLS 1.2. - if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms(c.vers)) { + if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms(c.vers)) || + !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, signatureSchemesForPublicKey(c.vers, c.peerCertificates[0].PublicKey)) { c.sendAlert(alertIllegalParameter) return errors.New("tls: certificate used with invalid signature algorithm") } diff --git a/src/crypto/tls/handshake_server_tls13.go b/src/crypto/tls/handshake_server_tls13.go index dbd6ff2c4f4d9465ffcdaa5714ab51d9988b9479..a52bc76a0d1a9feafcb8b52ca70e2601a6c91d2c 100644 --- a/src/crypto/tls/handshake_server_tls13.go +++ b/src/crypto/tls/handshake_server_tls13.go @@ -1098,7 +1098,8 @@ // See RFC 8446, Section 4.4.3. // We don't use certReq.supportedSignatureAlgorithms because it would // require keeping the certificateRequestMsgTLS13 around in the hs. - if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms(c.vers)) { + if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms(c.vers)) || + !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, signatureSchemesForPublicKey(c.vers, c.peerCertificates[0].PublicKey)) { c.sendAlert(alertIllegalParameter) return errors.New("tls: client certificate used with invalid signature algorithm") } diff --git a/src/encoding/json/bench_test.go b/src/encoding/json/bench_test.go index cd55ceed90133ce29f6c3eeff29069a70019bcd7..047188131cecf2619a6113803b2bd825ac1c9e22 100644 --- a/src/encoding/json/bench_test.go +++ b/src/encoding/json/bench_test.go @@ -14,9 +14,9 @@ package json import ( "bytes" - "compress/gzip" "fmt" "internal/testenv" + "internal/zstd" "io" "os" "reflect" @@ -46,15 +46,12 @@ var codeJSON []byte var codeStruct codeResponse func codeInit() { - f, err := os.Open("testdata/code.json.gz") + f, err := os.Open("internal/jsontest/testdata/golang_source.json.zst") if err != nil { panic(err) } defer f.Close() - gz, err := gzip.NewReader(f) - if err != nil { - panic(err) - } + gz := zstd.NewReader(f) data, err := io.ReadAll(gz) if err != nil { panic(err) diff --git a/src/encoding/json/decode.go b/src/encoding/json/decode.go index 4e195e0948daa28c50f106540da32f1aa0c4a480..70885a517e1876428258e0347170f5b2357c783a 100644 --- a/src/encoding/json/decode.go +++ b/src/encoding/json/decode.go @@ -43,11 +43,14 @@ // Otherwise, if the value implements [encoding.TextUnmarshaler] // and the input is a JSON quoted string, Unmarshal calls // [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string. // -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by [Marshal] (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. By -// default, object keys which don't have a corresponding struct field are -// ignored (see [Decoder.DisallowUnknownFields] for an alternative). +// To unmarshal JSON into a struct, Unmarshal matches incoming object keys to +// the keys used by [Marshal] (either the struct field name or its tag), +// ignoring case. If multiple struct fields match an object key, an exact case +// match is preferred over a case-insensitive one. +// +// Incoming object members are processed in the order observed. If an object +// includes duplicate keys, later duplicates will replace or be merged into +// prior values. // // To unmarshal JSON into an interface value, // Unmarshal stores one of these in the interface value: diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go index 5bc3d3c856413909244503ebc67279d52516b69c..473fd02833016d0cf11880bc66857f4abc06f91e 100644 --- a/src/encoding/json/decode_test.go +++ b/src/encoding/json/decode_test.go @@ -1189,6 +1189,27 @@ ptr: new([]int), out: []int{1, 2, 0, 4, 5}, err: &UnmarshalTypeError{Value: "bool", Type: reflect.TypeFor[int](), Offset: 9}, }, + + { + CaseName: Name("DashComma"), + in: `{"-":"hello"}`, + ptr: new(struct { + F string `json:"-,"` + }), + out: struct { + F string `json:"-,"` + }{"hello"}, + }, + { + CaseName: Name("DashCommaOmitEmpty"), + in: `{"-":"hello"}`, + ptr: new(struct { + F string `json:"-,omitempty"` + }), + out: struct { + F string `json:"-,omitempty"` + }{"hello"}, + }, } func TestMarshal(t *testing.T) { diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go index 1992e7372ecdd64b48b48000f176ebb4f55643b5..29fcc91fd7b0dabaa6d688a2623e5cedf2a37a66 100644 --- a/src/encoding/json/encode.go +++ b/src/encoding/json/encode.go @@ -4,12 +4,44 @@ // license that can be found in the LICENSE file. //go:build !goexperiment.jsonv2 -// Package json implements encoding and decoding of JSON as defined in -// RFC 7159. The mapping between JSON and Go values is described -// in the documentation for the Marshal and Unmarshal functions. +// Package json implements encoding and decoding of JSON as defined in RFC 7159. +// The mapping between JSON and Go values is described in the documentation for +// the Marshal and Unmarshal functions. // // See "JSON and Go" for an introduction to this package: // https://golang.org/doc/articles/json_and_go.html +// +// # Security Considerations +// +// The JSON standard (RFC 7159) is lax in its definition of a number of parser +// behaviors. As such, many JSON parsers behave differently in various +// scenarios. These differences in parsers mean that systems that use multiple +// independent JSON parser implementations may parse the same JSON object in +// differing ways. +// +// Systems that rely on a JSON object being parsed consistently for security +// purposes should be careful to understand the behaviors of this parser, as +// well as how these behaviors may cause interoperability issues with other +// parser implementations. +// +// Due to the Go Backwards Compatibility promise (https://go.dev/doc/go1compat) +// there are a number of behaviors this package exhibits that may cause +// interopability issues, but cannot be changed. In particular the following +// parsing behaviors may cause issues: +// +// - If a JSON object contains duplicate keys, keys are processed in the order +// they are observed, meaning later values will replace or be merged into +// prior values, depending on the field type (in particular maps and structs +// will have values merged, while other types have values replaced). +// - When parsing a JSON object into a Go struct, keys are considered in a +// case-insensitive fashion. +// - When parsing a JSON object into a Go struct, unknown keys in the JSON +// object are ignored (unless a [Decoder] is used and +// [Decoder.DisallowUnknownFields] has been called). +// - Invalid UTF-8 bytes in JSON strings are replaced by the Unicode +// replacement character. +// - Large JSON number integers will lose precision when unmarshaled into +// floating-point types. package json import ( diff --git a/src/encoding/json/internal/jsonopts/options.go b/src/encoding/json/internal/jsonopts/options.go index 2226830b6bcd9955b759bb31e390b5693d9c09b5..e4c3f47d36adc8f686c79dccef897d05d5999465 100644 --- a/src/encoding/json/internal/jsonopts/options.go +++ b/src/encoding/json/internal/jsonopts/options.go @@ -65,7 +65,7 @@ func (*Struct) JSONOptions(internal.NotForPublicUse) {} // GetUnknownOption is injected by the "json" package to handle Options // declared in that package so that "jsonopts" can handle them. -var GetUnknownOption = func(*Struct, Options) (any, bool) { panic("unknown option") } +var GetUnknownOption = func(Struct, Options) (any, bool) { panic("unknown option") } func GetOption[T any](opts Options, setter func(T) Options) (T, bool) { // Collapse the options to *Struct to simplify lookup. @@ -104,14 +104,14 @@ return zero, false } return any(structOpts.DepthLimit).(T), true default: - v, ok := GetUnknownOption(structOpts, opt) + v, ok := GetUnknownOption(*structOpts, opt) return v.(T), ok } } // JoinUnknownOption is injected by the "json" package to handle Options // declared in that package so that "jsonopts" can handle them. -var JoinUnknownOption = func(*Struct, Options) { panic("unknown option") } +var JoinUnknownOption = func(Struct, Options) Struct { panic("unknown option") } func (dst *Struct) Join(srcs ...Options) { dst.join(false, srcs...) @@ -182,7 +182,7 @@ dst.Unmarshalers = src.Unmarshalers } } default: - JoinUnknownOption(dst, src) + *dst = JoinUnknownOption(*dst, src) } } } diff --git a/src/encoding/json/jsontext/doc.go b/src/encoding/json/jsontext/doc.go index 755305151fb80e1702f71594a623a157393e58c7..8e4bced015d0000077402de9034b4925d14a910f 100644 --- a/src/encoding/json/jsontext/doc.go +++ b/src/encoding/json/jsontext/doc.go @@ -10,6 +10,11 @@ // JSON is a simple data interchange format that can represent // primitive data types such as booleans, strings, and numbers, // in addition to structured data types such as objects and arrays. // +// This package (encoding/json/jsontext) is experimental, +// and not subject to the Go 1 compatibility promise. +// It only exists when building with the GOEXPERIMENT=jsonv2 environment variable set. +// Most users should use [encoding/json]. +// // The [Encoder] and [Decoder] types are used to encode or decode // a stream of JSON tokens or values. // @@ -20,8 +25,8 @@ // // - a JSON literal (i.e., null, true, or false) // - a JSON string (e.g., "hello, world!") // - a JSON number (e.g., 123.456) -// - a start or end delimiter for a JSON object (i.e., '{' or '}') -// - a start or end delimiter for a JSON array (i.e., '[' or ']') +// - a begin or end delimiter for a JSON object (i.e., '{' or '}') +// - a begin or end delimiter for a JSON array (i.e., '[' or ']') // // A JSON token is represented by the [Token] type in Go. Technically, // there are two additional structural characters (i.e., ':' and ','), diff --git a/src/encoding/json/jsontext/encode.go b/src/encoding/json/jsontext/encode.go index a1e6307adc8bda4ac01985232657e325f0746faf..562d217fef7cbf766e232a2cd187d3699832afde 100644 --- a/src/encoding/json/jsontext/encode.go +++ b/src/encoding/json/jsontext/encode.go @@ -74,8 +74,8 @@ wr io.Writer // maxValue is the approximate maximum Value size passed to WriteValue. maxValue int - // unusedCache is the buffer returned by the UnusedBuffer method. - unusedCache []byte + // availBuffer is the buffer returned by the AvailableBuffer method. + availBuffer []byte // always has zero length // bufStats is statistics about buffer utilization. // It is only used with pooled encoders in pools.go. bufStats bufferStatistics @@ -114,7 +114,7 @@ func (e *encoderState) reset(b []byte, w io.Writer, opts ...Options) { e.state.reset() e.encodeBuffer = encodeBuffer{Buf: b, wr: w, bufStats: e.bufStats} if bb, ok := w.(*bytes.Buffer); ok && bb != nil { - e.Buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb + e.Buf = bb.AvailableBuffer() // alias the unused buffer of bb } opts2 := jsonopts.Struct{} // avoid mutating e.Struct in case it is part of opts opts2.Join(opts...) @@ -465,9 +465,9 @@ // copy it to a scratch buffer and then escape it back. isVerbatim := safeASCII || !jsonwire.NeedEscape(b[pos+len(`"`):len(b)-len(`"`)]) if !isVerbatim { var err error - b2 := append(e.unusedCache, b[pos+len(`"`):len(b)-len(`"`)]...) + b2 := append(e.availBuffer, b[pos+len(`"`):len(b)-len(`"`)]...) b, err = jsonwire.AppendQuote(b[:pos], string(b2), &e.Flags) - e.unusedCache = b2[:0] + e.availBuffer = b2[:0] if err != nil { return wrapSyntacticError(e, err, pos, +1) } @@ -713,7 +713,7 @@ // reformatObject parses a JSON object from the start of src and // appends it to the end of src, reformatting whitespace and strings as needed. // It returns the extended dst buffer and the number of consumed input bytes. func (e *encoderState) reformatObject(dst []byte, src Value, depth int) ([]byte, int, error) { - // Append object start. + // Append object begin. if len(src) == 0 || src[0] != '{' { panic("BUG: reformatObject must be called with a buffer that starts with '{'") } else if depth == maxNestingDepth+1 { @@ -824,7 +824,7 @@ // reformatArray parses a JSON array from the start of src and // appends it to the end of dst, reformatting whitespace and strings as needed. // It returns the extended dst buffer and the number of consumed input bytes. func (e *encoderState) reformatArray(dst []byte, src Value, depth int) ([]byte, int, error) { - // Append array start. + // Append array begin. if len(src) == 0 || src[0] != '[' { panic("BUG: reformatArray must be called with a buffer that starts with '['") } else if depth == maxNestingDepth+1 { @@ -900,20 +900,20 @@ func (e *Encoder) OutputOffset() int64 { return e.s.previousOffsetEnd() } -// UnusedBuffer returns a zero-length buffer with a possible non-zero capacity. +// AvailableBuffer returns a zero-length buffer with a possible non-zero capacity. // This buffer is intended to be used to populate a [Value] // being passed to an immediately succeeding [Encoder.WriteValue] call. // // Example usage: // -// b := d.UnusedBuffer() +// b := d.AvailableBuffer() // b = append(b, '"') // b = appendString(b, v) // append the string formatting of v // b = append(b, '"') // ... := d.WriteValue(b) // // It is the user's responsibility to ensure that the value is valid JSON. -func (e *Encoder) UnusedBuffer() []byte { +func (e *Encoder) AvailableBuffer() []byte { // NOTE: We don't return e.buf[len(e.buf):cap(e.buf)] since WriteValue would // need to take special care to avoid mangling the data while reformatting. // WriteValue can't easily identify whether the input Value aliases e.buf @@ -921,10 +921,10 @@ // without using unsafe.Pointer. Thus, we just return a different buffer. // Should this ever alias e.buf, we need to consider how it operates with // the specialized performance optimization for bytes.Buffer. n := 1 << bits.Len(uint(e.s.maxValue|63)) // fast approximation for max length - if cap(e.s.unusedCache) < n { - e.s.unusedCache = make([]byte, 0, n) + if cap(e.s.availBuffer) < n { + e.s.availBuffer = make([]byte, 0, n) } - return e.s.unusedCache + return e.s.availBuffer } // StackDepth returns the depth of the state machine for written JSON data. diff --git a/src/encoding/json/jsontext/state.go b/src/encoding/json/jsontext/state.go index 1e8b4f22dbf2882c53d16859eef871e5d72ef97a..d214fd5190325e3855780e9ec6c843a48700d15e 100644 --- a/src/encoding/json/jsontext/state.go +++ b/src/encoding/json/jsontext/state.go @@ -297,7 +297,7 @@ func (m *stateMachine) appendNumber() error { return m.appendLiteral() } -// pushObject appends a JSON start object token as next in the sequence. +// pushObject appends a JSON begin object token as next in the sequence. // If an error is returned, the state is not mutated. func (m *stateMachine) pushObject() error { switch { @@ -332,7 +332,7 @@ return nil } } -// pushArray appends a JSON start array token as next in the sequence. +// pushArray appends a JSON begin array token as next in the sequence. // If an error is returned, the state is not mutated. func (m *stateMachine) pushArray() error { switch { diff --git a/src/encoding/json/jsontext/token.go b/src/encoding/json/jsontext/token.go index 22717b154ac48ff493b46f09e71756d8cdf40ab9..e78c3f84d8650fec92a739f7d7016f4b881447c4 100644 --- a/src/encoding/json/jsontext/token.go +++ b/src/encoding/json/jsontext/token.go @@ -33,8 +33,8 @@ // Token represents a lexical JSON token, which may be one of the following: // - a JSON literal (i.e., null, true, or false) // - a JSON string (e.g., "hello, world!") // - a JSON number (e.g., 123.456) -// - a start or end delimiter for a JSON object (i.e., { or } ) -// - a start or end delimiter for a JSON array (i.e., [ or ] ) +// - a begin or end delimiter for a JSON object (i.e., { or } ) +// - a begin or end delimiter for a JSON array (i.e., [ or ] ) // // A Token cannot represent entire array or object values, while a [Value] can. // There is no Token to represent commas and colons since @@ -481,9 +481,9 @@ // - 'f': false // - 't': true // - '"': string // - '0': number -// - '{': object start +// - '{': object begin // - '}': object end -// - '[': array start +// - '[': array begin // - ']': array end // // An invalid kind is usually represented using 0, diff --git a/src/encoding/json/testdata/code.json.gz b/src/encoding/json/testdata/code.json.gz deleted file mode 100644 index 1572a92bfbdfe99c28195104740d1f4b1e560f66..0000000000000000000000000000000000000000 Binary files a/src/encoding/json/testdata/code.json.gz and /dev/null differ diff --git a/src/encoding/json/v2/arshal.go b/src/encoding/json/v2/arshal.go index 99fcc5bd467d172ead4f00d31fdf9ba009f53f2a..10b16efe4a6a7296cea88230c815649bd3c5b6b0 100644 --- a/src/encoding/json/v2/arshal.go +++ b/src/encoding/json/v2/arshal.go @@ -147,17 +147,23 @@ // formatted in RFC 3339 with nanosecond precision. // If the format matches one of the format constants declared // in the time package (e.g., RFC1123), then that format is used. // If the format is "unix", "unixmilli", "unixmicro", or "unixnano", -// then the timestamp is encoded as a JSON number of the number of seconds -// (or milliseconds, microseconds, or nanoseconds) since the Unix epoch, -// which is January 1st, 1970 at 00:00:00 UTC. +// then the timestamp is encoded as a possibly fractional JSON number +// of the number of seconds (or milliseconds, microseconds, or nanoseconds) +// since the Unix epoch, which is January 1st, 1970 at 00:00:00 UTC. +// To avoid a fractional component, round the timestamp to the relevant unit. // Otherwise, the format is used as-is with [time.Time.Format] if non-empty. // -// - A Go [time.Duration] is encoded as a JSON string containing the duration -// formatted according to [time.Duration.String]. +// - A Go [time.Duration] currently has no default representation and +// requires an explicit format to be specified. // If the format is "sec", "milli", "micro", or "nano", -// then the duration is encoded as a JSON number of the number of seconds -// (or milliseconds, microseconds, or nanoseconds) in the duration. -// If the format is "units", it uses [time.Duration.String]. +// then the duration is encoded as a possibly fractional JSON number +// of the number of seconds (or milliseconds, microseconds, or nanoseconds). +// To avoid a fractional component, round the duration to the relevant unit. +// If the format is "units", it is encoded as a JSON string formatted using +// [time.Duration.String] (e.g., "1h30m" for 1 hour 30 minutes). +// If the format is "iso8601", it is encoded as a JSON string using the +// ISO 8601 standard for durations (e.g., "PT1H30M" for 1 hour 30 minutes) +// using only accurate units of hours, minutes, and seconds. // // - All other Go types (e.g., complex numbers, channels, and functions) // have no default representation and result in a [SemanticError]. @@ -375,17 +381,21 @@ // formatted in RFC 3339 with nanosecond precision. // If the format matches one of the format constants declared in // the time package (e.g., RFC1123), then that format is used for parsing. // If the format is "unix", "unixmilli", "unixmicro", or "unixnano", -// then the timestamp is decoded from a JSON number of the number of seconds -// (or milliseconds, microseconds, or nanoseconds) since the Unix epoch, -// which is January 1st, 1970 at 00:00:00 UTC. +// then the timestamp is decoded from an optionally fractional JSON number +// of the number of seconds (or milliseconds, microseconds, or nanoseconds) +// since the Unix epoch, which is January 1st, 1970 at 00:00:00 UTC. // Otherwise, the format is used as-is with [time.Time.Parse] if non-empty. // -// - A Go [time.Duration] is decoded from a JSON string by -// passing the decoded string to [time.ParseDuration]. +// - A Go [time.Duration] currently has no default representation and +// requires an explicit format to be specified. // If the format is "sec", "milli", "micro", or "nano", -// then the duration is decoded from a JSON number of the number of seconds -// (or milliseconds, microseconds, or nanoseconds) in the duration. -// If the format is "units", it uses [time.ParseDuration]. +// then the duration is decoded from an optionally fractional JSON number +// of the number of seconds (or milliseconds, microseconds, or nanoseconds). +// If the format is "units", it is decoded from a JSON string parsed using +// [time.ParseDuration] (e.g., "1h30m" for 1 hour 30 minutes). +// If the format is "iso8601", it is decoded from a JSON string using the +// ISO 8601 standard for durations (e.g., "PT1H30M" for 1 hour 30 minutes) +// accepting only accurate units of hours, minutes, or seconds. // // - All other Go types (e.g., complex numbers, channels, and functions) // have no default representation and result in a [SemanticError]. diff --git a/src/encoding/json/v2/arshal_inlined.go b/src/encoding/json/v2/arshal_inlined.go index 0b5782fdccc8363749760c50db5c711ee3da7359..6299cc4a428ae17a61bff45ca779cfbfc2f2f9d1 100644 --- a/src/encoding/json/v2/arshal_inlined.go +++ b/src/encoding/json/v2/arshal_inlined.go @@ -113,7 +113,7 @@ } mk := newAddressableValue(m.Type().Key()) mv := newAddressableValue(m.Type().Elem()) marshalKey := func(mk addressableValue) error { - b, err := jsonwire.AppendQuote(enc.UnusedBuffer(), mk.String(), &mo.Flags) + b, err := jsonwire.AppendQuote(enc.AvailableBuffer(), mk.String(), &mo.Flags) if err != nil { return newMarshalErrorBefore(enc, m.Type().Key(), err) } diff --git a/src/encoding/json/v2/arshal_test.go b/src/encoding/json/v2/arshal_test.go index f1060cccb53a516e695510dae03bf284bc5df768..8494deed03b64651e840b402602c4a20e01387cb 100644 --- a/src/encoding/json/v2/arshal_test.go +++ b/src/encoding/json/v2/arshal_test.go @@ -365,7 +365,7 @@ Array [1]string `json:",omitzero,format:invalid"` Interface any `json:",omitzero,format:invalid"` } structDurationFormat struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:units"` D3 time.Duration `json:",format:sec"` D4 time.Duration `json:",string,format:sec"` @@ -375,6 +375,7 @@ D7 time.Duration `json:",format:micro"` D8 time.Duration `json:",string,format:micro"` D9 time.Duration `json:",format:nano"` D10 time.Duration `json:",string,format:nano"` + D11 time.Duration `json:",format:iso8601"` } structTimeFormat struct { T1 time.Time @@ -4312,14 +4313,14 @@ want: `"called"`, }, { name: jsontest.Name("Duration/Zero"), in: struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{0, 0}, want: `{"D1":"0s","D2":0}`, }, { name: jsontest.Name("Duration/Positive"), in: struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{ 123456789123456789, @@ -4329,7 +4330,7 @@ want: `{"D1":"34293h33m9.123456789s","D2":123456789123456789}`, }, { name: jsontest.Name("Duration/Negative"), in: struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{ -123456789123456789, @@ -4356,14 +4357,16 @@ }{}, want: `{"D"`, wantErr: EM(errInvalidFormatFlag).withPos(`{"D":`, "/D").withType(0, T[time.Duration]()), }, { + /* TODO(https://go.dev/issue/71631): Re-enable this test case. name: jsontest.Name("Duration/IgnoreInvalidFormat"), opts: []Options{invalidFormatOption}, in: time.Duration(0), want: `"0s"`, - }, { + }, { */ name: jsontest.Name("Duration/Format"), opts: []Options{jsontext.Multiline(true)}, in: structDurationFormat{ + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, @@ -4385,21 +4388,24 @@ "D6": "45296078.090012", "D7": 45296078090.012, "D8": "45296078090.012", "D9": 45296078090012, - "D10": "45296078090012" + "D10": "45296078090012", + "D11": "PT12H34M56.078090012S" }`, }, { + /* TODO(https://go.dev/issue/71631): Re-enable this test case. name: jsontest.Name("Duration/Format/Legacy"), opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1}, in: structDurationFormat{ D1: 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, D2: 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, }, - want: `{"D1":45296078090012,"D2":"12h34m56.078090012s","D3":0,"D4":"0","D5":0,"D6":"0","D7":0,"D8":"0","D9":0,"D10":"0"}`, - }, { + want: `{"D1":45296078090012,"D2":"12h34m56.078090012s","D3":0,"D4":"0","D5":0,"D6":"0","D7":0,"D8":"0","D9":0,"D10":"0","D11":"PT0S"}`, + }, { */ + /* TODO(https://go.dev/issue/71631): Re-enable this test case. name: jsontest.Name("Duration/MapKey"), in: map[time.Duration]string{time.Second: ""}, want: `{"1s":""}`, - }, { + }, { */ name: jsontest.Name("Duration/MapKey/Legacy"), opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1}, in: map[time.Duration]string{time.Second: ""}, @@ -8713,33 +8719,33 @@ }, { name: jsontest.Name("Duration/Null"), inBuf: `{"D1":null,"D2":null}`, inVal: addr(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{1, 1}), want: addr(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{0, 0}), }, { name: jsontest.Name("Duration/Zero"), inBuf: `{"D1":"0s","D2":0}`, inVal: addr(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{1, 1}), want: addr(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{0, 0}), }, { name: jsontest.Name("Duration/Positive"), inBuf: `{"D1":"34293h33m9.123456789s","D2":123456789123456789}`, inVal: new(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }), want: addr(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{ 123456789123456789, @@ -8749,11 +8755,11 @@ }, { name: jsontest.Name("Duration/Negative"), inBuf: `{"D1":"-34293h33m9.123456789s","D2":-123456789123456789}`, inVal: new(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }), want: addr(struct { - D1 time.Duration + D1 time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. D2 time.Duration `json:",format:nano"` }{ -123456789123456789, @@ -8801,20 +8807,20 @@ }, { name: jsontest.Name("Duration/String/Mismatch"), inBuf: `{"D":-123456789123456789}`, inVal: addr(struct { - D time.Duration + D time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. }{1}), want: addr(struct { - D time.Duration + D time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. }{1}), wantErr: EU(nil).withPos(`{"D":`, "/D").withType('0', timeDurationType), }, { name: jsontest.Name("Duration/String/Invalid"), inBuf: `{"D":"5minkutes"}`, inVal: addr(struct { - D time.Duration + D time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. }{1}), want: addr(struct { - D time.Duration + D time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. }{1}), wantErr: EU(func() error { _, err := time.ParseDuration("5minkutes") @@ -8824,13 +8830,42 @@ }, { name: jsontest.Name("Duration/Syntax/Invalid"), inBuf: `{"D":x}`, inVal: addr(struct { - D time.Duration + D time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. }{1}), want: addr(struct { - D time.Duration + D time.Duration `json:",format:units"` // TODO(https://go.dev/issue/71631): Remove the format flag. }{1}), wantErr: newInvalidCharacterError("x", "at start of value", len64(`{"D":`), "/D"), }, { + name: jsontest.Name("Duration/Format"), + inBuf: `{ + "D1": "12h34m56.078090012s", + "D2": "12h34m56.078090012s", + "D3": 45296.078090012, + "D4": "45296.078090012", + "D5": 45296078.090012, + "D6": "45296078.090012", + "D7": 45296078090.012, + "D8": "45296078090.012", + "D9": 45296078090012, + "D10": "45296078090012", + "D11": "PT12H34M56.078090012S" + }`, + inVal: new(structDurationFormat), + want: addr(structDurationFormat{ + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, + }), + }, { name: jsontest.Name("Duration/Format/Invalid"), inBuf: `{"D":"0s"}`, inVal: addr(struct { @@ -8841,6 +8876,7 @@ D time.Duration `json:",format:invalid"` }{1}), wantErr: EU(errInvalidFormatFlag).withPos(`{"D":`, "/D").withType(0, timeDurationType), }, { + /* TODO(https://go.dev/issue/71631): Re-enable this test case. name: jsontest.Name("Duration/Format/Legacy"), inBuf: `{"D1":45296078090012,"D2":"12h34m56.078090012s"}`, opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1}, @@ -8849,24 +8885,26 @@ want: addr(structDurationFormat{ D1: 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, D2: 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond, }), - }, { + }, { */ + /* TODO(https://go.dev/issue/71631): Re-enable this test case. name: jsontest.Name("Duration/MapKey"), inBuf: `{"1s":""}`, inVal: new(map[time.Duration]string), want: addr(map[time.Duration]string{time.Second: ""}), - }, { + }, { */ name: jsontest.Name("Duration/MapKey/Legacy"), opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1}, inBuf: `{"1000000000":""}`, inVal: new(map[time.Duration]string), want: addr(map[time.Duration]string{time.Second: ""}), }, { + /* TODO(https://go.dev/issue/71631): Re-enable this test case. name: jsontest.Name("Duration/IgnoreInvalidFormat"), opts: []Options{invalidFormatOption}, inBuf: `"1s"`, inVal: addr(time.Duration(0)), want: addr(time.Second), - }, { + }, { */ name: jsontest.Name("Time/Zero"), inBuf: `{"T1":"0001-01-01T00:00:00Z","T2":"01 Jan 01 00:00 UTC","T3":"0001-01-01","T4":"0001-01-01T00:00:00Z","T5":"0001-01-01T00:00:00Z"}`, inVal: new(struct { diff --git a/src/encoding/json/v2/arshal_time.go b/src/encoding/json/v2/arshal_time.go index e40a04f12a0ed43ef158aaaaf29f36adcfda5ead..fefa50ff5f0d7511c9a47ae5ed035d21bb97494a 100644 --- a/src/encoding/json/v2/arshal_time.go +++ b/src/encoding/json/v2/arshal_time.go @@ -52,6 +52,9 @@ return newInvalidFormatError(enc, t, mo) } } else if mo.Flags.Get(jsonflags.FormatTimeWithLegacySemantics) { return marshalNano(enc, va, mo) + } else { + // TODO(https://go.dev/issue/71631): Decide on default duration representation. + return newMarshalErrorBefore(enc, t, errors.New("no default representation (see https://go.dev/issue/71631); specify an explicit format")) } // TODO(https://go.dev/issue/62121): Use reflect.Value.AssertTo. @@ -75,6 +78,9 @@ return newInvalidFormatError(dec, t, uo) } } else if uo.Flags.Get(jsonflags.FormatTimeWithLegacySemantics) { return unmarshalNano(dec, va, uo) + } else { + // TODO(https://go.dev/issue/71631): Decide on default duration representation. + return newUnmarshalErrorBeforeWithSkipping(dec, uo, t, errors.New("no default representation (see https://go.dev/issue/71631); specify an explicit format")) } stringify := !u.isNumeric() || xd.Tokens.Last.NeedObjectName() || uo.Flags.Get(jsonflags.StringifyNumbers) @@ -200,6 +206,7 @@ // base records the representation where: // - 0 uses time.Duration.String // - 1e0, 1e3, 1e6, or 1e9 use a decimal encoding of the duration as // nanoseconds, microseconds, milliseconds, or seconds. + // - 8601 uses ISO 8601 base uint64 } @@ -215,6 +222,8 @@ case "micro": a.base = 1e3 case "nano": a.base = 1e0 + case "iso8601": + a.base = 8601 default: return false } @@ -222,13 +231,15 @@ return true } func (a *durationArshaler) isNumeric() bool { - return a.base != 0 && a.base != 60 + return a.base != 0 && a.base != 8601 } func (a *durationArshaler) appendMarshal(b []byte) ([]byte, error) { switch a.base { case 0: return append(b, a.td.String()...), nil + case 8601: + return appendDurationISO8601(b, a.td), nil default: return appendDurationBase10(b, a.td, a.base), nil } @@ -238,6 +249,8 @@ func (a *durationArshaler) unmarshal(b []byte) (err error) { switch a.base { case 0: a.td, err = time.ParseDuration(string(b)) + case 8601: + a.td, err = parseDurationISO8601(b) default: a.td, err = parseDurationBase10(b, a.base) } @@ -412,7 +425,7 @@ // parseDurationBase10 parses d from a decimal fractional number, // where pow10 is a power-of-10 used to scale up the number. func parseDurationBase10(b []byte, pow10 uint64) (time.Duration, error) { - suffix, neg := consumeSign(b) // consume sign + suffix, neg := consumeSign(b, false) // consume sign wholeBytes, fracBytes := bytesCutByte(suffix, '.', true) // consume whole and frac fields whole, okWhole := jsonwire.ParseUint(wholeBytes) // parse whole field; may overflow frac, okFrac := parseFracBase10(fracBytes, pow10) // parse frac field @@ -428,6 +441,166 @@ return d, nil } } +// appendDurationISO8601 appends an ISO 8601 duration with a restricted grammar, +// where leading and trailing zeroes and zero-value designators are omitted. +// It only uses hour, minute, and second designators since ISO 8601 defines +// those as being "accurate", while year, month, week, and day are "nominal". +func appendDurationISO8601(b []byte, d time.Duration) []byte { + if d == 0 { + return append(b, "PT0S"...) + } + b, n := mayAppendDurationSign(b, d) + b = append(b, "PT"...) + n, nsec := bits.Div64(0, n, 1e9) // compute nsec field + n, sec := bits.Div64(0, n, 60) // compute sec field + hour, min := bits.Div64(0, n, 60) // compute hour and min fields + if hour > 0 { + b = append(strconv.AppendUint(b, hour, 10), 'H') + } + if min > 0 { + b = append(strconv.AppendUint(b, min, 10), 'M') + } + if sec > 0 || nsec > 0 { + b = append(appendFracBase10(strconv.AppendUint(b, sec, 10), nsec, 1e9), 'S') + } + return b +} + +// daysPerYear is the exact average number of days in a year according to +// the Gregorian calender, which has an extra day each year that is +// a multiple of 4, unless it is evenly divisible by 100 but not by 400. +// This does not take into account leap seconds, which are not deterministic. +const daysPerYear = 365.2425 + +var errInaccurateDateUnits = errors.New("inaccurate year, month, week, or day units") + +// parseDurationISO8601 parses a duration according to ISO 8601-1:2019, +// section 5.5.2.2 and 5.5.2.3 with the following restrictions or extensions: +// +// - A leading minus sign is permitted for negative duration according +// to ISO 8601-2:2019, section 4.4.1.9. We do not permit negative values +// for each "time scale component", which is permitted by section 4.4.1.1, +// but rarely supported by parsers. +// +// - A leading plus sign is permitted (and ignored). +// This is not required by ISO 8601, but not forbidden either. +// There is some precedent for this as it is supported by the principle of +// duration arithmetic as specified in ISO 8601-2-2019, section 14.1. +// Of note, the JavaScript grammar for ISO 8601 permits a leading plus sign. +// +// - A fractional value is only permitted for accurate units +// (i.e., hour, minute, and seconds) in the last time component, +// which is permissible by ISO 8601-1:2019, section 5.5.2.3. +// +// - Both periods ('.') and commas (',') are supported as the separator +// between the integer part and fraction part of a number, +// as specified in ISO 8601-1:2019, section 3.2.6. +// While ISO 8601 recommends comma as the default separator, +// most formatters uses a period. +// +// - Leading zeros are ignored. This is not required by ISO 8601, +// but also not forbidden by the standard. Many parsers support this. +// +// - Lowercase designators are supported. This is not required by ISO 8601, +// but also not forbidden by the standard. Many parsers support this. +// +// If the nominal units of year, month, week, or day are present, +// this produces a best-effort value and also reports [errInaccurateDateUnits]. +// +// The accepted grammar is identical to JavaScript's Duration: +// +// https://tc39.es/proposal-temporal/#prod-Duration +// +// We follow JavaScript's grammar as JSON itself is derived from JavaScript. +// The Temporal.Duration.toJSON method is guaranteed to produce an output +// that can be parsed by this function so long as arithmetic in JavaScript +// do not use a largestUnit value higher than "hours" (which is the default). +// Even if it does, this will do a best-effort parsing with inaccurate units, +// but report [errInaccurateDateUnits]. +func parseDurationISO8601(b []byte) (time.Duration, error) { + var invalid, overflow, inaccurate, sawFrac bool + var sumNanos, n, co uint64 + + // cutBytes is like [bytes.Cut], but uses either c0 or c1 as the separator. + cutBytes := func(b []byte, c0, c1 byte) (prefix, suffix []byte, ok bool) { + for i, c := range b { + if c == c0 || c == c1 { + return b[:i], b[i+1:], true + } + } + return b, nil, false + } + + // mayParseUnit attempts to parse another date or time number + // identified by the desHi and desLo unit characters. + // If the part is absent for current unit, it returns b as is. + mayParseUnit := func(b []byte, desHi, desLo byte, unit time.Duration) []byte { + number, suffix, ok := cutBytes(b, desHi, desLo) + if !ok || sawFrac { + return b // designator is not present or already saw fraction, which can only be in the last component + } + + // Parse the number. + // A fraction allowed for the accurate units in the last part. + whole, frac, ok := cutBytes(number, '.', ',') + if ok { + sawFrac = true + invalid = invalid || len(frac) == len("") || unit > time.Hour + if unit == time.Second { + n, ok = parsePaddedBase10(frac, uint64(time.Second)) + invalid = invalid || !ok + } else { + f, err := strconv.ParseFloat("0."+string(frac), 64) + invalid = invalid || err != nil || len(bytes.Trim(frac[len("."):], "0123456789")) > 0 + n = uint64(math.Round(f * float64(unit))) // never overflows since f is within [0..1] + } + sumNanos, co = bits.Add64(sumNanos, n, 0) // overflow if co > 0 + overflow = overflow || co > 0 + } + for len(whole) > 1 && whole[0] == '0' { + whole = whole[len("0"):] // trim leading zeros + } + n, ok := jsonwire.ParseUint(whole) // overflow if !ok && MaxUint64 + hi, lo := bits.Mul64(n, uint64(unit)) // overflow if hi > 0 + sumNanos, co = bits.Add64(sumNanos, lo, 0) // overflow if co > 0 + invalid = invalid || (!ok && n != math.MaxUint64) + overflow = overflow || (!ok && n == math.MaxUint64) || hi > 0 || co > 0 + inaccurate = inaccurate || unit > time.Hour + return suffix + } + + suffix, neg := consumeSign(b, true) + prefix, suffix, okP := cutBytes(suffix, 'P', 'p') + durDate, durTime, okT := cutBytes(suffix, 'T', 't') + invalid = invalid || len(prefix) > 0 || !okP || (okT && len(durTime) == 0) || len(durDate)+len(durTime) == 0 + if len(durDate) > 0 { // nominal portion of the duration + durDate = mayParseUnit(durDate, 'Y', 'y', time.Duration(daysPerYear*24*60*60*1e9)) + durDate = mayParseUnit(durDate, 'M', 'm', time.Duration(daysPerYear/12*24*60*60*1e9)) + durDate = mayParseUnit(durDate, 'W', 'w', time.Duration(7*24*60*60*1e9)) + durDate = mayParseUnit(durDate, 'D', 'd', time.Duration(24*60*60*1e9)) + invalid = invalid || len(durDate) > 0 // unknown elements + } + if len(durTime) > 0 { // accurate portion of the duration + durTime = mayParseUnit(durTime, 'H', 'h', time.Duration(60*60*1e9)) + durTime = mayParseUnit(durTime, 'M', 'm', time.Duration(60*1e9)) + durTime = mayParseUnit(durTime, 'S', 's', time.Duration(1e9)) + invalid = invalid || len(durTime) > 0 // unknown elements + } + d := mayApplyDurationSign(sumNanos, neg) + overflow = overflow || (neg != (d < 0) && d != 0) // overflows signed duration + + switch { + case invalid: + return 0, fmt.Errorf("invalid ISO 8601 duration %q: %w", b, strconv.ErrSyntax) + case overflow: + return 0, fmt.Errorf("invalid ISO 8601 duration %q: %w", b, strconv.ErrRange) + case inaccurate: + return d, fmt.Errorf("invalid ISO 8601 duration %q: %w", b, errInaccurateDateUnits) + default: + return d, nil + } +} + // mayAppendDurationSign appends a negative sign if n is negative. func mayAppendDurationSign(b []byte, d time.Duration) ([]byte, uint64) { if d < 0 { @@ -471,7 +644,7 @@ // parseTimeUnix parses t formatted as a decimal fractional number, // where pow10 is a power-of-10 used to scale down the number. func parseTimeUnix(b []byte, pow10 uint64) (time.Time, error) { - suffix, neg := consumeSign(b) // consume sign + suffix, neg := consumeSign(b, false) // consume sign wholeBytes, fracBytes := bytesCutByte(suffix, '.', true) // consume whole and frac fields whole, okWhole := jsonwire.ParseUint(wholeBytes) // parse whole field; may overflow frac, okFrac := parseFracBase10(fracBytes, 1e9/pow10) // parse frac field @@ -570,10 +743,14 @@ } return n, true } -// consumeSign consumes an optional leading negative sign. -func consumeSign(b []byte) ([]byte, bool) { - if len(b) > 0 && b[0] == '-' { - return b[len("-"):], true +// consumeSign consumes an optional leading negative or positive sign. +func consumeSign(b []byte, allowPlus bool) ([]byte, bool) { + if len(b) > 0 { + if b[0] == '-' { + return b[len("-"):], true + } else if b[0] == '+' && allowPlus { + return b[len("+"):], false + } } return b, false } diff --git a/src/encoding/json/v2/arshal_time_test.go b/src/encoding/json/v2/arshal_time_test.go index faa09de5098505bcf1e298a2446da015e2262383..6c08e12494860ba4e731e315643a95c6d6e18c58 100644 --- a/src/encoding/json/v2/arshal_time_test.go +++ b/src/encoding/json/v2/arshal_time_test.go @@ -7,8 +7,10 @@ package json import ( + "errors" "fmt" "math" + "strconv" "testing" "time" @@ -28,63 +30,67 @@ base10Sec string base10Milli string base10Micro string base10Nano string + iso8601 string }{ - {math.MaxInt64, "9223372036.854775807", "9223372036854.775807", "9223372036854775.807", "9223372036854775807"}, - {1e12 + 1e12, "2000", "2000000", "2000000000", "2000000000000"}, - {1e12 + 1e11, "1100", "1100000", "1100000000", "1100000000000"}, - {1e12 + 1e10, "1010", "1010000", "1010000000", "1010000000000"}, - {1e12 + 1e9, "1001", "1001000", "1001000000", "1001000000000"}, - {1e12 + 1e8, "1000.1", "1000100", "1000100000", "1000100000000"}, - {1e12 + 1e7, "1000.01", "1000010", "1000010000", "1000010000000"}, - {1e12 + 1e6, "1000.001", "1000001", "1000001000", "1000001000000"}, - {1e12 + 1e5, "1000.0001", "1000000.1", "1000000100", "1000000100000"}, - {1e12 + 1e4, "1000.00001", "1000000.01", "1000000010", "1000000010000"}, - {1e12 + 1e3, "1000.000001", "1000000.001", "1000000001", "1000000001000"}, - {1e12 + 1e2, "1000.0000001", "1000000.0001", "1000000000.1", "1000000000100"}, - {1e12 + 1e1, "1000.00000001", "1000000.00001", "1000000000.01", "1000000000010"}, - {1e12 + 1e0, "1000.000000001", "1000000.000001", "1000000000.001", "1000000000001"}, - {+(1e9 + 1), "1.000000001", "1000.000001", "1000000.001", "1000000001"}, - {+(1e9), "1", "1000", "1000000", "1000000000"}, - {+(1e9 - 1), "0.999999999", "999.999999", "999999.999", "999999999"}, - {+100000000, "0.1", "100", "100000", "100000000"}, - {+120000000, "0.12", "120", "120000", "120000000"}, - {+123000000, "0.123", "123", "123000", "123000000"}, - {+123400000, "0.1234", "123.4", "123400", "123400000"}, - {+123450000, "0.12345", "123.45", "123450", "123450000"}, - {+123456000, "0.123456", "123.456", "123456", "123456000"}, - {+123456700, "0.1234567", "123.4567", "123456.7", "123456700"}, - {+123456780, "0.12345678", "123.45678", "123456.78", "123456780"}, - {+123456789, "0.123456789", "123.456789", "123456.789", "123456789"}, - {+12345678, "0.012345678", "12.345678", "12345.678", "12345678"}, - {+1234567, "0.001234567", "1.234567", "1234.567", "1234567"}, - {+123456, "0.000123456", "0.123456", "123.456", "123456"}, - {+12345, "0.000012345", "0.012345", "12.345", "12345"}, - {+1234, "0.000001234", "0.001234", "1.234", "1234"}, - {+123, "0.000000123", "0.000123", "0.123", "123"}, - {+12, "0.000000012", "0.000012", "0.012", "12"}, - {+1, "0.000000001", "0.000001", "0.001", "1"}, - {0, "0", "0", "0", "0"}, - {-1, "-0.000000001", "-0.000001", "-0.001", "-1"}, - {-12, "-0.000000012", "-0.000012", "-0.012", "-12"}, - {-123, "-0.000000123", "-0.000123", "-0.123", "-123"}, - {-1234, "-0.000001234", "-0.001234", "-1.234", "-1234"}, - {-12345, "-0.000012345", "-0.012345", "-12.345", "-12345"}, - {-123456, "-0.000123456", "-0.123456", "-123.456", "-123456"}, - {-1234567, "-0.001234567", "-1.234567", "-1234.567", "-1234567"}, - {-12345678, "-0.012345678", "-12.345678", "-12345.678", "-12345678"}, - {-123456789, "-0.123456789", "-123.456789", "-123456.789", "-123456789"}, - {-123456780, "-0.12345678", "-123.45678", "-123456.78", "-123456780"}, - {-123456700, "-0.1234567", "-123.4567", "-123456.7", "-123456700"}, - {-123456000, "-0.123456", "-123.456", "-123456", "-123456000"}, - {-123450000, "-0.12345", "-123.45", "-123450", "-123450000"}, - {-123400000, "-0.1234", "-123.4", "-123400", "-123400000"}, - {-123000000, "-0.123", "-123", "-123000", "-123000000"}, - {-120000000, "-0.12", "-120", "-120000", "-120000000"}, - {-100000000, "-0.1", "-100", "-100000", "-100000000"}, - {-(1e9 - 1), "-0.999999999", "-999.999999", "-999999.999", "-999999999"}, - {-(1e9), "-1", "-1000", "-1000000", "-1000000000"}, - {-(1e9 + 1), "-1.000000001", "-1000.000001", "-1000000.001", "-1000000001"}, - {math.MinInt64, "-9223372036.854775808", "-9223372036854.775808", "-9223372036854775.808", "-9223372036854775808"}, + {math.MaxInt64, "9223372036.854775807", "9223372036854.775807", "9223372036854775.807", "9223372036854775807", "PT2562047H47M16.854775807S"}, + {123*time.Hour + 4*time.Minute + 56*time.Second, "443096", "443096000", "443096000000", "443096000000000", "PT123H4M56S"}, + {time.Hour, "3600", "3600000", "3600000000", "3600000000000", "PT1H"}, + {time.Minute, "60", "60000", "60000000", "60000000000", "PT1M"}, + {1e12 + 1e12, "2000", "2000000", "2000000000", "2000000000000", "PT33M20S"}, + {1e12 + 1e11, "1100", "1100000", "1100000000", "1100000000000", "PT18M20S"}, + {1e12 + 1e10, "1010", "1010000", "1010000000", "1010000000000", "PT16M50S"}, + {1e12 + 1e9, "1001", "1001000", "1001000000", "1001000000000", "PT16M41S"}, + {1e12 + 1e8, "1000.1", "1000100", "1000100000", "1000100000000", "PT16M40.1S"}, + {1e12 + 1e7, "1000.01", "1000010", "1000010000", "1000010000000", "PT16M40.01S"}, + {1e12 + 1e6, "1000.001", "1000001", "1000001000", "1000001000000", "PT16M40.001S"}, + {1e12 + 1e5, "1000.0001", "1000000.1", "1000000100", "1000000100000", "PT16M40.0001S"}, + {1e12 + 1e4, "1000.00001", "1000000.01", "1000000010", "1000000010000", "PT16M40.00001S"}, + {1e12 + 1e3, "1000.000001", "1000000.001", "1000000001", "1000000001000", "PT16M40.000001S"}, + {1e12 + 1e2, "1000.0000001", "1000000.0001", "1000000000.1", "1000000000100", "PT16M40.0000001S"}, + {1e12 + 1e1, "1000.00000001", "1000000.00001", "1000000000.01", "1000000000010", "PT16M40.00000001S"}, + {1e12 + 1e0, "1000.000000001", "1000000.000001", "1000000000.001", "1000000000001", "PT16M40.000000001S"}, + {+(1e9 + 1), "1.000000001", "1000.000001", "1000000.001", "1000000001", "PT1.000000001S"}, + {+(1e9), "1", "1000", "1000000", "1000000000", "PT1S"}, + {+(1e9 - 1), "0.999999999", "999.999999", "999999.999", "999999999", "PT0.999999999S"}, + {+100000000, "0.1", "100", "100000", "100000000", "PT0.1S"}, + {+120000000, "0.12", "120", "120000", "120000000", "PT0.12S"}, + {+123000000, "0.123", "123", "123000", "123000000", "PT0.123S"}, + {+123400000, "0.1234", "123.4", "123400", "123400000", "PT0.1234S"}, + {+123450000, "0.12345", "123.45", "123450", "123450000", "PT0.12345S"}, + {+123456000, "0.123456", "123.456", "123456", "123456000", "PT0.123456S"}, + {+123456700, "0.1234567", "123.4567", "123456.7", "123456700", "PT0.1234567S"}, + {+123456780, "0.12345678", "123.45678", "123456.78", "123456780", "PT0.12345678S"}, + {+123456789, "0.123456789", "123.456789", "123456.789", "123456789", "PT0.123456789S"}, + {+12345678, "0.012345678", "12.345678", "12345.678", "12345678", "PT0.012345678S"}, + {+1234567, "0.001234567", "1.234567", "1234.567", "1234567", "PT0.001234567S"}, + {+123456, "0.000123456", "0.123456", "123.456", "123456", "PT0.000123456S"}, + {+12345, "0.000012345", "0.012345", "12.345", "12345", "PT0.000012345S"}, + {+1234, "0.000001234", "0.001234", "1.234", "1234", "PT0.000001234S"}, + {+123, "0.000000123", "0.000123", "0.123", "123", "PT0.000000123S"}, + {+12, "0.000000012", "0.000012", "0.012", "12", "PT0.000000012S"}, + {+1, "0.000000001", "0.000001", "0.001", "1", "PT0.000000001S"}, + {0, "0", "0", "0", "0", "PT0S"}, + {-1, "-0.000000001", "-0.000001", "-0.001", "-1", "-PT0.000000001S"}, + {-12, "-0.000000012", "-0.000012", "-0.012", "-12", "-PT0.000000012S"}, + {-123, "-0.000000123", "-0.000123", "-0.123", "-123", "-PT0.000000123S"}, + {-1234, "-0.000001234", "-0.001234", "-1.234", "-1234", "-PT0.000001234S"}, + {-12345, "-0.000012345", "-0.012345", "-12.345", "-12345", "-PT0.000012345S"}, + {-123456, "-0.000123456", "-0.123456", "-123.456", "-123456", "-PT0.000123456S"}, + {-1234567, "-0.001234567", "-1.234567", "-1234.567", "-1234567", "-PT0.001234567S"}, + {-12345678, "-0.012345678", "-12.345678", "-12345.678", "-12345678", "-PT0.012345678S"}, + {-123456789, "-0.123456789", "-123.456789", "-123456.789", "-123456789", "-PT0.123456789S"}, + {-123456780, "-0.12345678", "-123.45678", "-123456.78", "-123456780", "-PT0.12345678S"}, + {-123456700, "-0.1234567", "-123.4567", "-123456.7", "-123456700", "-PT0.1234567S"}, + {-123456000, "-0.123456", "-123.456", "-123456", "-123456000", "-PT0.123456S"}, + {-123450000, "-0.12345", "-123.45", "-123450", "-123450000", "-PT0.12345S"}, + {-123400000, "-0.1234", "-123.4", "-123400", "-123400000", "-PT0.1234S"}, + {-123000000, "-0.123", "-123", "-123000", "-123000000", "-PT0.123S"}, + {-120000000, "-0.12", "-120", "-120000", "-120000000", "-PT0.12S"}, + {-100000000, "-0.1", "-100", "-100000", "-100000000", "-PT0.1S"}, + {-(1e9 - 1), "-0.999999999", "-999.999999", "-999999.999", "-999999999", "-PT0.999999999S"}, + {-(1e9), "-1", "-1000", "-1000000", "-1000000000", "-PT1S"}, + {-(1e9 + 1), "-1.000000001", "-1000.000001", "-1000000.001", "-1000000001", "-PT1.000000001S"}, + {math.MinInt64, "-9223372036.854775808", "-9223372036854.775808", "-9223372036854775.808", "-9223372036854775808", "-PT2562047H47M16.854775808S"}, } func TestFormatDuration(t *testing.T) { @@ -107,6 +113,7 @@ check(tt.td, tt.base10Sec, 1e9) check(tt.td, tt.base10Milli, 1e6) check(tt.td, tt.base10Micro, 1e3) check(tt.td, tt.base10Nano, 1e0) + check(tt.td, tt.iso8601, 8601) } } @@ -114,31 +121,108 @@ var parseDurationTestdata = []struct { in string base uint64 want time.Duration - wantErr bool + wantErr error }{ - {"0", 1e0, 0, false}, - {"0.", 1e0, 0, true}, - {"0.0", 1e0, 0, false}, - {"0.00", 1e0, 0, false}, - {"00.0", 1e0, 0, true}, - {"+0", 1e0, 0, true}, - {"1e0", 1e0, 0, true}, - {"1.000000000x", 1e9, 0, true}, - {"1.000000x", 1e6, 0, true}, - {"1.000x", 1e3, 0, true}, - {"1.x", 1e0, 0, true}, - {"1.0000000009", 1e9, +time.Second, false}, - {"1.0000009", 1e6, +time.Millisecond, false}, - {"1.0009", 1e3, +time.Microsecond, false}, - {"1.9", 1e0, +time.Nanosecond, false}, - {"-9223372036854775809", 1e0, 0, true}, - {"9223372036854775.808", 1e3, 0, true}, - {"-9223372036854.775809", 1e6, 0, true}, - {"9223372036.854775808", 1e9, 0, true}, - {"-1.9", 1e0, -time.Nanosecond, false}, - {"-1.0009", 1e3, -time.Microsecond, false}, - {"-1.0000009", 1e6, -time.Millisecond, false}, - {"-1.0000000009", 1e9, -time.Second, false}, + {"0", 1e0, 0, nil}, + {"0.", 1e0, 0, strconv.ErrSyntax}, + {"0.0", 1e0, 0, nil}, + {"0.00", 1e0, 0, nil}, + {"00.0", 1e0, 0, strconv.ErrSyntax}, + {"+0", 1e0, 0, strconv.ErrSyntax}, + {"1e0", 1e0, 0, strconv.ErrSyntax}, + {"1.000000000x", 1e9, 0, strconv.ErrSyntax}, + {"1.000000x", 1e6, 0, strconv.ErrSyntax}, + {"1.000x", 1e3, 0, strconv.ErrSyntax}, + {"1.x", 1e0, 0, strconv.ErrSyntax}, + {"1.0000000009", 1e9, +time.Second, nil}, + {"1.0000009", 1e6, +time.Millisecond, nil}, + {"1.0009", 1e3, +time.Microsecond, nil}, + {"1.9", 1e0, +time.Nanosecond, nil}, + {"-9223372036854775809", 1e0, 0, strconv.ErrRange}, + {"9223372036854775.808", 1e3, 0, strconv.ErrRange}, + {"-9223372036854.775809", 1e6, 0, strconv.ErrRange}, + {"9223372036.854775808", 1e9, 0, strconv.ErrRange}, + {"-1.9", 1e0, -time.Nanosecond, nil}, + {"-1.0009", 1e3, -time.Microsecond, nil}, + {"-1.0000009", 1e6, -time.Millisecond, nil}, + {"-1.0000000009", 1e9, -time.Second, nil}, + {"", 8601, 0, strconv.ErrSyntax}, + {"P", 8601, 0, strconv.ErrSyntax}, + {"PT", 8601, 0, strconv.ErrSyntax}, + {"PT0", 8601, 0, strconv.ErrSyntax}, + {"DT0S", 8601, 0, strconv.ErrSyntax}, + {"PT0S", 8601, 0, nil}, + {" PT0S", 8601, 0, strconv.ErrSyntax}, + {"PT0S ", 8601, 0, strconv.ErrSyntax}, + {"+PT0S", 8601, 0, nil}, + {"PT0.M", 8601, 0, strconv.ErrSyntax}, + {"PT0.S", 8601, 0, strconv.ErrSyntax}, + {"PT0.0S", 8601, 0, nil}, + {"PT0.0_0H", 8601, 0, strconv.ErrSyntax}, + {"PT0.0_0M", 8601, 0, strconv.ErrSyntax}, + {"PT0.0_0S", 8601, 0, strconv.ErrSyntax}, + {"PT.0S", 8601, 0, strconv.ErrSyntax}, + {"PT00.0S", 8601, 0, nil}, + {"PT0S", 8601, 0, nil}, + {"PT1,5S", 8601, time.Second + 500*time.Millisecond, nil}, + {"PT1H", 8601, time.Hour, nil}, + {"PT1H0S", 8601, time.Hour, nil}, + {"PT0S", 8601, 0, nil}, + {"PT00S", 8601, 0, nil}, + {"PT000S", 8601, 0, nil}, + {"PTS", 8601, 0, strconv.ErrSyntax}, + {"PT1M", 8601, time.Minute, nil}, + {"PT01M", 8601, time.Minute, nil}, + {"PT001M", 8601, time.Minute, nil}, + {"PT1H59S", 8601, time.Hour + 59*time.Second, nil}, + {"PT123H4M56.789S", 8601, 123*time.Hour + 4*time.Minute + 56*time.Second + 789*time.Millisecond, nil}, + {"-PT123H4M56.789S", 8601, -123*time.Hour - 4*time.Minute - 56*time.Second - 789*time.Millisecond, nil}, + {"PT0H0S", 8601, 0, nil}, + {"PT0H", 8601, 0, nil}, + {"PT0M", 8601, 0, nil}, + {"-PT0S", 8601, 0, nil}, + {"PT1M0S", 8601, time.Minute, nil}, + {"PT0H1M0S", 8601, time.Minute, nil}, + {"PT01H02M03S", 8601, 1*time.Hour + 2*time.Minute + 3*time.Second, nil}, + {"PT0,123S", 8601, 123 * time.Millisecond, nil}, + {"PT1.S", 8601, 0, strconv.ErrSyntax}, + {"PT1.000S", 8601, time.Second, nil}, + {"PT0.025H", 8601, time.Minute + 30*time.Second, nil}, + {"PT0.025H0M", 8601, 0, strconv.ErrSyntax}, + {"PT1.5M", 8601, time.Minute + 30*time.Second, nil}, + {"PT1.5M0S", 8601, 0, strconv.ErrSyntax}, + {"PT60M", 8601, time.Hour, nil}, + {"PT3600S", 8601, time.Hour, nil}, + {"PT1H2M3.0S", 8601, 1*time.Hour + 2*time.Minute + 3*time.Second, nil}, + {"pt1h2m3,0s", 8601, 1*time.Hour + 2*time.Minute + 3*time.Second, nil}, + {"PT-1H-2M-3S", 8601, 0, strconv.ErrSyntax}, + {"P1Y", 8601, time.Duration(daysPerYear * 24 * 60 * 60 * 1e9), errInaccurateDateUnits}, + {"P1.0Y", 8601, 0, strconv.ErrSyntax}, + {"P1M", 8601, time.Duration(daysPerYear / 12 * 24 * 60 * 60 * 1e9), errInaccurateDateUnits}, + {"P1.0M", 8601, 0, strconv.ErrSyntax}, + {"P1W", 8601, 7 * 24 * time.Hour, errInaccurateDateUnits}, + {"P1.0W", 8601, 0, strconv.ErrSyntax}, + {"P1D", 8601, 24 * time.Hour, errInaccurateDateUnits}, + {"P1.0D", 8601, 0, strconv.ErrSyntax}, + {"P1W1S", 8601, 0, strconv.ErrSyntax}, + {"-P1Y2M3W4DT5H6M7.8S", 8601, -(time.Duration(14*daysPerYear/12*24*60*60*1e9) + time.Duration((3*7+4)*24*60*60*1e9) + 5*time.Hour + 6*time.Minute + 7*time.Second + 800*time.Millisecond), errInaccurateDateUnits}, + {"-p1y2m3w4dt5h6m7.8s", 8601, -(time.Duration(14*daysPerYear/12*24*60*60*1e9) + time.Duration((3*7+4)*24*60*60*1e9) + 5*time.Hour + 6*time.Minute + 7*time.Second + 800*time.Millisecond), errInaccurateDateUnits}, + {"P0Y0M0DT1H2M3S", 8601, 1*time.Hour + 2*time.Minute + 3*time.Second, errInaccurateDateUnits}, + {"PT0.0000000001S", 8601, 0, nil}, + {"PT0.0000000005S", 8601, 0, nil}, + {"PT0.000000000500000000S", 8601, 0, nil}, + {"PT0.000000000499999999S", 8601, 0, nil}, + {"PT2562047H47M16.854775808S", 8601, 0, strconv.ErrRange}, + {"-PT2562047H47M16.854775809S", 8601, 0, strconv.ErrRange}, + {"PT9223372036.854775807S", 8601, math.MaxInt64, nil}, + {"PT9223372036.854775808S", 8601, 0, strconv.ErrRange}, + {"-PT9223372036.854775808S", 8601, math.MinInt64, nil}, + {"-PT9223372036.854775809S", 8601, 0, strconv.ErrRange}, + {"PT18446744073709551616S", 8601, 0, strconv.ErrRange}, + {"PT5124096H", 8601, 0, strconv.ErrRange}, + {"PT2562047.7880152155019444H", 8601, math.MaxInt64, nil}, + {"PT2562047.7880152155022222H", 8601, 0, strconv.ErrRange}, + {"PT5124094H94M33.709551616S", 8601, 0, strconv.ErrRange}, } func TestParseDuration(t *testing.T) { @@ -147,10 +231,8 @@ a := durationArshaler{base: tt.base} switch err := a.unmarshal([]byte(tt.in)); { case a.td != tt.want: t.Errorf("parseDuration(%q, %s) = %v, want %v", tt.in, baseLabel(tt.base), a.td, tt.want) - case (err == nil) && tt.wantErr: - t.Errorf("parseDuration(%q, %s) error is nil, want non-nil", tt.in, baseLabel(tt.base)) - case (err != nil) && !tt.wantErr: - t.Errorf("parseDuration(%q, %s) error is non-nil, want nil", tt.in, baseLabel(tt.base)) + case !errors.Is(err, tt.wantErr): + t.Errorf("parseDuration(%q, %s) error = %v, want %v", tt.in, baseLabel(tt.base), err, tt.wantErr) } } } @@ -161,7 +243,7 @@ f.Add(int64(tt.td)) } f.Fuzz(func(t *testing.T, want int64) { var buf []byte - for _, base := range [...]uint64{1e0, 1e3, 1e6, 1e9} { + for _, base := range [...]uint64{1e0, 1e3, 1e6, 1e9, 8601} { a := durationArshaler{td: time.Duration(want), base: base} buf, _ = a.appendMarshal(buf[:0]) switch err := a.unmarshal(buf); { @@ -179,9 +261,11 @@ for _, tt := range parseDurationTestdata { f.Add([]byte(tt.in)) } f.Fuzz(func(t *testing.T, in []byte) { - for _, base := range [...]uint64{1e0, 1e3, 1e6, 1e9, 60} { + for _, base := range [...]uint64{1e0, 1e3, 1e6, 1e9, 8601} { a := durationArshaler{base: base} - if err := a.unmarshal(in); err == nil && base != 60 { + switch err := a.unmarshal(in); { + case err != nil: // nothing else to check + case base != 8601: if n, err := jsonwire.ConsumeNumber(in); err != nil || n != len(in) { t.Fatalf("parseDuration(%q) error is nil for invalid JSON number", in) } @@ -239,26 +323,26 @@ var parseTimeTestdata = []struct { in string base uint64 want time.Time - wantErr bool + wantErr error }{ - {"0", 1e0, time.Unix(0, 0).UTC(), false}, - {"0.", 1e0, time.Time{}, true}, - {"0.0", 1e0, time.Unix(0, 0).UTC(), false}, - {"0.00", 1e0, time.Unix(0, 0).UTC(), false}, - {"00.0", 1e0, time.Time{}, true}, - {"+0", 1e0, time.Time{}, true}, - {"1e0", 1e0, time.Time{}, true}, - {"1234567890123456789012345678901234567890", 1e0, time.Time{}, true}, - {"9223372036854775808000.000000", 1e3, time.Time{}, true}, - {"9223372036854775807999999.9999", 1e6, time.Unix(math.MaxInt64, 1e9-1).UTC(), false}, - {"9223372036854775807999999999.9", 1e9, time.Unix(math.MaxInt64, 1e9-1).UTC(), false}, - {"9223372036854775807.999999999x", 1e0, time.Time{}, true}, - {"9223372036854775807000000000", 1e9, time.Unix(math.MaxInt64, 0).UTC(), false}, - {"-9223372036854775808", 1e0, time.Unix(math.MinInt64, 0).UTC(), false}, - {"-9223372036854775808000.000001", 1e3, time.Time{}, true}, - {"-9223372036854775808000000.0001", 1e6, time.Unix(math.MinInt64, 0).UTC(), false}, - {"-9223372036854775808000000000.x", 1e9, time.Time{}, true}, - {"-1234567890123456789012345678901234567890", 1e9, time.Time{}, true}, + {"0", 1e0, time.Unix(0, 0).UTC(), nil}, + {"0.", 1e0, time.Time{}, strconv.ErrSyntax}, + {"0.0", 1e0, time.Unix(0, 0).UTC(), nil}, + {"0.00", 1e0, time.Unix(0, 0).UTC(), nil}, + {"00.0", 1e0, time.Time{}, strconv.ErrSyntax}, + {"+0", 1e0, time.Time{}, strconv.ErrSyntax}, + {"1e0", 1e0, time.Time{}, strconv.ErrSyntax}, + {"1234567890123456789012345678901234567890", 1e0, time.Time{}, strconv.ErrRange}, + {"9223372036854775808000.000000", 1e3, time.Time{}, strconv.ErrRange}, + {"9223372036854775807999999.9999", 1e6, time.Unix(math.MaxInt64, 1e9-1).UTC(), nil}, + {"9223372036854775807999999999.9", 1e9, time.Unix(math.MaxInt64, 1e9-1).UTC(), nil}, + {"9223372036854775807.999999999x", 1e0, time.Time{}, strconv.ErrSyntax}, + {"9223372036854775807000000000", 1e9, time.Unix(math.MaxInt64, 0).UTC(), nil}, + {"-9223372036854775808", 1e0, time.Unix(math.MinInt64, 0).UTC(), nil}, + {"-9223372036854775808000.000001", 1e3, time.Time{}, strconv.ErrRange}, + {"-9223372036854775808000000.0001", 1e6, time.Unix(math.MinInt64, 0).UTC(), nil}, + {"-9223372036854775808000000000.x", 1e9, time.Time{}, strconv.ErrSyntax}, + {"-1234567890123456789012345678901234567890", 1e9, time.Time{}, strconv.ErrRange}, } func TestParseTime(t *testing.T) { @@ -267,10 +351,8 @@ a := timeArshaler{base: tt.base} switch err := a.unmarshal([]byte(tt.in)); { case a.tt != tt.want: t.Errorf("parseTime(%q, %s) = time.Unix(%d, %d), want time.Unix(%d, %d)", tt.in, baseLabel(tt.base), a.tt.Unix(), a.tt.Nanosecond(), tt.want.Unix(), tt.want.Nanosecond()) - case (err == nil) && tt.wantErr: - t.Errorf("parseTime(%q, %s) = (time.Unix(%d, %d), nil), want non-nil error", tt.in, baseLabel(tt.base), a.tt.Unix(), a.tt.Nanosecond()) - case (err != nil) && !tt.wantErr: - t.Errorf("parseTime(%q, %s) error is non-nil, want nil", tt.in, baseLabel(tt.base)) + case !errors.Is(err, tt.wantErr): + t.Errorf("parseTime(%q, %s) error = %v, want %v", tt.in, baseLabel(tt.base), err, tt.wantErr) } } } diff --git a/src/encoding/json/v2/bench_test.go b/src/encoding/json/v2/bench_test.go index a46f4ab5d37b5fe30dac84021ce82820b666d570..ae4a5b20a5cd56fa98225525664eef0b0dceb3d3 100644 --- a/src/encoding/json/v2/bench_test.go +++ b/src/encoding/json/v2/bench_test.go @@ -267,12 +267,13 @@ val: new(jsonArshalerV2), new: func() any { return new(jsonArshalerV2) }, skipV1: true, }, { + /* TODO(https://go.dev/issue/71631): Re-enable this test case. name: "Duration", raw: []byte(`"1h1m1s"`), val: addr(time.Hour + time.Minute + time.Second), new: func() any { return new(time.Duration) }, skipV1: true, -}, { + }, { */ name: "Time", raw: []byte(`"2006-01-02T22:04:05Z"`), val: addr(time.Unix(1136239445, 0).UTC()), diff --git a/src/encoding/json/v2/doc.go b/src/encoding/json/v2/doc.go index 8dd0b138f5eed36dd61d068113adafd879f4f003..203139754c203449fde420f9886b0110ffdf9e38 100644 --- a/src/encoding/json/v2/doc.go +++ b/src/encoding/json/v2/doc.go @@ -9,6 +9,11 @@ // JSON is a simple data interchange format that can represent // primitive data types such as booleans, strings, and numbers, // in addition to structured data types such as objects and arrays. // +// This package (encoding/json/v2) is experimental, +// and not subject to the Go 1 compatibility promise. +// It only exists when building with the GOEXPERIMENT=jsonv2 environment variable set. +// Most users should use [encoding/json]. +// // [Marshal] and [Unmarshal] encode and decode Go values // to/from JSON text contained within a []byte. // [MarshalWrite] and [UnmarshalRead] operate on JSON text diff --git a/src/encoding/json/v2/example_test.go b/src/encoding/json/v2/example_test.go index fe40bff964cf842b45ca1ffccc90b280d3a11493..c6bf0a864d8385a9331f16c1335bfa0d2ae6846c 100644 --- a/src/encoding/json/v2/example_test.go +++ b/src/encoding/json/v2/example_test.go @@ -402,27 +402,29 @@ // The "format" tag option can be used to alter the formatting of certain types. func Example_formatFlags() { value := struct { - BytesBase64 []byte `json:",format:base64"` - BytesHex [8]byte `json:",format:hex"` - BytesArray []byte `json:",format:array"` - FloatNonFinite float64 `json:",format:nonfinite"` - MapEmitNull map[string]any `json:",format:emitnull"` - SliceEmitNull []any `json:",format:emitnull"` - TimeDateOnly time.Time `json:",format:'2006-01-02'"` - TimeUnixSec time.Time `json:",format:unix"` - DurationSecs time.Duration `json:",format:sec"` - DurationNanos time.Duration `json:",format:nano"` + BytesBase64 []byte `json:",format:base64"` + BytesHex [8]byte `json:",format:hex"` + BytesArray []byte `json:",format:array"` + FloatNonFinite float64 `json:",format:nonfinite"` + MapEmitNull map[string]any `json:",format:emitnull"` + SliceEmitNull []any `json:",format:emitnull"` + TimeDateOnly time.Time `json:",format:'2006-01-02'"` + TimeUnixSec time.Time `json:",format:unix"` + DurationSecs time.Duration `json:",format:sec"` + DurationNanos time.Duration `json:",format:nano"` + DurationISO8601 time.Duration `json:",format:iso8601"` }{ - BytesBase64: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, - BytesHex: [8]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, - BytesArray: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, - FloatNonFinite: math.NaN(), - MapEmitNull: nil, - SliceEmitNull: nil, - TimeDateOnly: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), - TimeUnixSec: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), - DurationSecs: 12*time.Hour + 34*time.Minute + 56*time.Second + 7*time.Millisecond + 8*time.Microsecond + 9*time.Nanosecond, - DurationNanos: 12*time.Hour + 34*time.Minute + 56*time.Second + 7*time.Millisecond + 8*time.Microsecond + 9*time.Nanosecond, + BytesBase64: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, + BytesHex: [8]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, + BytesArray: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, + FloatNonFinite: math.NaN(), + MapEmitNull: nil, + SliceEmitNull: nil, + TimeDateOnly: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + TimeUnixSec: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + DurationSecs: 12*time.Hour + 34*time.Minute + 56*time.Second + 7*time.Millisecond + 8*time.Microsecond + 9*time.Nanosecond, + DurationNanos: 12*time.Hour + 34*time.Minute + 56*time.Second + 7*time.Millisecond + 8*time.Microsecond + 9*time.Nanosecond, + DurationISO8601: 12*time.Hour + 34*time.Minute + 56*time.Second + 7*time.Millisecond + 8*time.Microsecond + 9*time.Nanosecond, } b, err := json.Marshal(&value) @@ -452,7 +454,8 @@ // "SliceEmitNull": null, // "TimeDateOnly": "2000-01-01", // "TimeUnixSec": 946684800, // "DurationSecs": 45296.007008009, - // "DurationNanos": 45296007008009 + // "DurationNanos": 45296007008009, + // "DurationISO8601": "PT12H34M56.007008009S" // } } diff --git a/src/encoding/json/v2/fields.go b/src/encoding/json/v2/fields.go index 9413189c0850da49933cfaba3003e6e263c2fb1d..4a02be7327a04b9ffae1ab3e8e565b9318d9c7a7 100644 --- a/src/encoding/json/v2/fields.go +++ b/src/encoding/json/v2/fields.go @@ -404,6 +404,7 @@ // a structured set of options configuring parameters such as // the JSON member name and other features. func parseFieldOptions(sf reflect.StructField) (out fieldOptions, ignored bool, err error) { tag, hasTag := sf.Tag.Lookup("json") + tagOrig := tag // Check whether this field is explicitly ignored. if tag == "-" { @@ -452,6 +453,13 @@ } if !utf8.ValidString(name) { err = cmp.Or(err, fmt.Errorf("Go struct field %s has JSON object name %q with invalid UTF-8", sf.Name, name)) name = string([]rune(name)) // replace invalid UTF-8 with utf8.RuneError + } + if name == "-" && tag[0] == '-' { + defer func() { // defer to let other errors take precedence + err = cmp.Or(err, fmt.Errorf("Go struct field %s has JSON object name %q; either "+ + "use `json:\"-\"` to ignore the field or "+ + "use `json:\"'-'%s` to specify %q as the name", sf.Name, out.name, strings.TrimPrefix(strconv.Quote(tagOrig), `"-`), name)) + }() } if err2 == nil { out.hasName = true diff --git a/src/encoding/json/v2/fields_test.go b/src/encoding/json/v2/fields_test.go index 1c36f80905246fd2ec23aa44146e4786f2f8a2d8..ae58182f298ab4d05b7758a3533d7bfbb4d74033 100644 --- a/src/encoding/json/v2/fields_test.go +++ b/src/encoding/json/v2/fields_test.go @@ -503,6 +503,19 @@ }{}, wantOpts: fieldOptions{hasName: true, name: "-", quotedName: `"-"`}, wantErr: errors.New("Go struct field V has malformed `json` tag: invalid trailing ',' character"), }, { + name: jsontest.Name("DashCommaOmitEmpty"), + in: struct { + V int `json:"-,omitempty"` + }{}, + wantOpts: fieldOptions{hasName: true, name: "-", quotedName: `"-"`, omitempty: true}, + wantErr: errors.New("Go struct field V has JSON object name \"-\"; either use `json:\"-\"` to ignore the field or use `json:\"'-',omitempty\"` to specify \"-\" as the name"), + }, { + name: jsontest.Name("QuotedDashCommaOmitEmpty"), + in: struct { + V int `json:"'-',omitempty"` + }{}, + wantOpts: fieldOptions{hasName: true, name: "-", quotedName: `"-"`, omitempty: true}, + }, { name: jsontest.Name("QuotedDashName"), in: struct { V int `json:"'-'"` diff --git a/src/encoding/json/v2/options.go b/src/encoding/json/v2/options.go index 12bbdb5d86bcf32336470b54177b06823f24a78e..0942d2d30784f96a738967ccebeb53ff1cc2f521 100644 --- a/src/encoding/json/v2/options.go +++ b/src/encoding/json/v2/options.go @@ -257,7 +257,7 @@ func (*unmarshalersOption) JSONOptions(internal.NotForPublicUse) {} // Inject support into "jsonopts" to handle these types. func init() { - jsonopts.GetUnknownOption = func(src *jsonopts.Struct, zero jsonopts.Options) (any, bool) { + jsonopts.GetUnknownOption = func(src jsonopts.Struct, zero jsonopts.Options) (any, bool) { switch zero.(type) { case *marshalersOption: if !src.Flags.Has(jsonflags.Marshalers) { @@ -273,7 +273,7 @@ default: panic(fmt.Sprintf("unknown option %T", zero)) } } - jsonopts.JoinUnknownOption = func(dst *jsonopts.Struct, src jsonopts.Options) { + jsonopts.JoinUnknownOption = func(dst jsonopts.Struct, src jsonopts.Options) jsonopts.Struct { switch src := src.(type) { case *marshalersOption: dst.Flags.Set(jsonflags.Marshalers | 1) @@ -284,5 +284,6 @@ dst.Unmarshalers = (*Unmarshalers)(src) default: panic(fmt.Sprintf("unknown option %T", src)) } + return dst } } diff --git a/src/encoding/json/v2_decode.go b/src/encoding/json/v2_decode.go index 4b9e8509395d340a0eb4d55452f8e19cbacb4b56..c82ee903c33c112f0e972d91560742e1e0f54f15 100644 --- a/src/encoding/json/v2_decode.go +++ b/src/encoding/json/v2_decode.go @@ -199,7 +199,7 @@ stringify = true // expecting a JSON object name } n = cmp.Or(n, "0") var num []byte - val := enc.UnusedBuffer() + val := enc.AvailableBuffer() if stringify { val = append(val, '"') val = append(val, n...) diff --git a/src/encoding/json/v2_decode_test.go b/src/encoding/json/v2_decode_test.go index fe814a3cfd52c0428393a93f8af0bce1581d3378..3ab20e2b5d06032074d967d281d41a513c58e121 100644 --- a/src/encoding/json/v2_decode_test.go +++ b/src/encoding/json/v2_decode_test.go @@ -1195,6 +1195,27 @@ ptr: new([]int), out: []int{1, 2, 0, 4, 5}, err: &UnmarshalTypeError{Value: "bool", Type: reflect.TypeFor[int](), Field: "2", Offset: len64(`[1,2,`)}, }, + + { + CaseName: Name("DashComma"), + in: `{"-":"hello"}`, + ptr: new(struct { + F string `json:"-,"` + }), + out: struct { + F string `json:"-,"` + }{"hello"}, + }, + { + CaseName: Name("DashCommaOmitEmpty"), + in: `{"-":"hello"}`, + ptr: new(struct { + F string `json:"-,omitempty"` + }), + out: struct { + F string `json:"-,omitempty"` + }{"hello"}, + }, } func TestMarshal(t *testing.T) { diff --git a/src/encoding/json/v2_diff_test.go b/src/encoding/json/v2_diff_test.go index 871be497767b63149f8b4ab5e04fffeb9d3805cb..7a561732f4a187b4564a415a5a149e5a46f79e60 100644 --- a/src/encoding/json/v2_diff_test.go +++ b/src/encoding/json/v2_diff_test.go @@ -1038,6 +1038,7 @@ // Related issue: // // https://go.dev/issue/10275 func TestTimeDurations(t *testing.T) { + t.SkipNow() // TODO(https://go.dev/issue/71631): The default representation of time.Duration is still undecided. for _, json := range jsonPackages { t.Run(path.Join("Marshal", json.Version), func(t *testing.T) { got, err := json.Marshal(time.Minute) diff --git a/src/encoding/json/v2_encode.go b/src/encoding/json/v2_encode.go index c8f35d4281c751434d57b0d91f87afc26efdd4e5..cbb167dbd0df4091e3979ec0c46a6e04b9810109 100644 --- a/src/encoding/json/v2_encode.go +++ b/src/encoding/json/v2_encode.go @@ -68,7 +68,10 @@ // false, 0, a nil pointer, a nil interface value, and any array, // slice, map, or string of length zero. // // As a special case, if the field tag is "-", the field is always omitted. -// Note that a field with name "-" can still be generated using the tag "-,". +// JSON names containing commas or quotes, or names identical to "" or "-", +// can be specified using a single-quoted string literal, where the syntax +// is identical to the Go grammar for a double-quoted string literal, +// but instead uses single quotes as the delimiters. // // Examples of struct field tags and their meanings: // @@ -89,7 +92,7 @@ // // Field is ignored by this package. // Field int `json:"-"` // // // Field appears in JSON as key "-". -// Field int `json:"-,"` +// Field int `json:"'-'"` // // The "omitzero" option specifies that the field should be omitted // from the encoding if the field has a zero value, according to rules: diff --git a/src/encoding/json/v2_options.go b/src/encoding/json/v2_options.go index 40b20e555258c63ccf4bbec3bdb79bd6e879aea2..4006d764ccfba0d81a91e2a980fc85908a915a8a 100644 --- a/src/encoding/json/v2_options.go +++ b/src/encoding/json/v2_options.go @@ -221,7 +221,7 @@ // - [jsontext.AllowDuplicateNames] // - [jsontext.AllowInvalidUTF8] // - [jsontext.EscapeForHTML] // - [jsontext.EscapeForJS] -// - [jsontext.PreserveRawString] +// - [jsontext.PreserveRawStrings] // // All other boolean options are set to false. // All non-boolean options are set to the zero value, diff --git a/src/go/doc/testdata/issue62640.0.golden b/src/go/doc/testdata/issue62640.0.golden new file mode 100644 index 0000000000000000000000000000000000000000..90775fd283b9ea232274b32e55d1583c8b6b1fd2 --- /dev/null +++ b/src/go/doc/testdata/issue62640.0.golden @@ -0,0 +1,22 @@ +// +PACKAGE issue62640 + +IMPORTPATH + testdata/issue62640 + +FILENAMES + testdata/issue62640.go + +TYPES + // + type E struct{} + + // F should be hidden within S because of the S.F field. + func (E) F() + + // + type S struct { + E + F int + } + diff --git a/src/go/doc/testdata/issue62640.1.golden b/src/go/doc/testdata/issue62640.1.golden new file mode 100644 index 0000000000000000000000000000000000000000..90775fd283b9ea232274b32e55d1583c8b6b1fd2 --- /dev/null +++ b/src/go/doc/testdata/issue62640.1.golden @@ -0,0 +1,22 @@ +// +PACKAGE issue62640 + +IMPORTPATH + testdata/issue62640 + +FILENAMES + testdata/issue62640.go + +TYPES + // + type E struct{} + + // F should be hidden within S because of the S.F field. + func (E) F() + + // + type S struct { + E + F int + } + diff --git a/src/go/doc/testdata/issue62640.2.golden b/src/go/doc/testdata/issue62640.2.golden new file mode 100644 index 0000000000000000000000000000000000000000..6e871aa370023a52826a004b7b9a65886560ae79 --- /dev/null +++ b/src/go/doc/testdata/issue62640.2.golden @@ -0,0 +1,25 @@ +// +PACKAGE issue62640 + +IMPORTPATH + testdata/issue62640 + +FILENAMES + testdata/issue62640.go + +TYPES + // + type E struct{} + + // F should be hidden within S because of the S.F field. + func (E) F() + + // + type S struct { + E + F int + } + + // F should be hidden within S because of the S.F field. + func (S) F() + diff --git a/src/go/doc/testdata/issue62640.go b/src/go/doc/testdata/issue62640.go new file mode 100644 index 0000000000000000000000000000000000000000..f109de46fa92fe90cdfb31a419731fd7212f2b55 --- /dev/null +++ b/src/go/doc/testdata/issue62640.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue62640 + +type E struct{} + +// F should be hidden within S because of the S.F field. +func (E) F() {} + +type S struct { + E + F int +} diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index f5a911306fb4b8e653453ea7d047f1802076d8fc..4396b8ae89d1db0a57790e0b11811c9b094b810a 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -364,6 +364,11 @@ // go.dev/issue/47895 {`package p; import "unsafe"; type S struct { f int }; var s S; var _ = unsafe.Offsetof(s.f)`, `s.f`, `int`}, + // go.dev/issue/74303. Note that interface field types are synthetic, so + // even though `func()` doesn't appear in the source, it appears in the + // syntax tree. + {`package p; type T interface { M(int) }`, `func(int)`, `func(int)`}, + // go.dev/issue/50093 {`package u0a; func _[_ interface{int}]() {}`, `int`, `int`}, {`package u1a; func _[_ interface{~int}]() {}`, `~int`, `~int`}, diff --git a/src/go/types/interface.go b/src/go/types/interface.go index 6bcae7aef0e02f0f4b9350761d4c197cca86d59f..5f9c88d8f5c833d3a162c40c11bf4f3c9643bf99 100644 --- a/src/go/types/interface.go +++ b/src/go/types/interface.go @@ -176,19 +176,17 @@ // We have a method with name f.Names[0]. name := f.Names[0] if name.Name == "_" { check.error(name, BlankIfaceMethod, "methods must have a unique non-blank name") - continue // ignore method + continue // ignore } - // Type-check method declaration. - // Note: Don't call check.typ(f.Type) as that would record - // the method incorrectly as a type expression in Info.Types. - ftyp, _ := f.Type.(*ast.FuncType) - if ftyp == nil { - check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", f.Type) - continue // ignore method + typ := check.typ(f.Type) + sig, _ := typ.(*Signature) + if sig == nil { + if isValid(typ) { + check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", typ) + } + continue // ignore } - sig := new(Signature) - check.funcType(sig, nil, ftyp) // The go/parser doesn't accept method type parameters but an ast.FuncType may have them. if sig.tparams != nil { diff --git a/src/hash/hash.go b/src/hash/hash.go index af84e7796bdf47306b43dab2daf2fcaf4a20c31a..d4b9a91663c0de15d826467dff2ba4a81d7984e3 100644 --- a/src/hash/hash.go +++ b/src/hash/hash.go @@ -57,13 +57,14 @@ Hash Sum64() uint64 } -// A Cloner is a hash function whose state can be cloned. +// A Cloner is a hash function whose state can be cloned, returning a value with +// equivalent and independent state. // // All [Hash] implementations in the standard library implement this interface, // unless GOFIPS140=v1.0.0 is set. // -// If a hash can only determine at runtime if it can be cloned, -// (e.g., if it wraps another hash), it may return [errors.ErrUnsupported]. +// If a hash can only determine at runtime if it can be cloned (e.g. if it wraps +// another hash), it may return an error wrapping [errors.ErrUnsupported]. type Cloner interface { Hash Clone() (Cloner, error) diff --git a/src/internal/abi/iface.go b/src/internal/abi/iface.go index e1e69367c6ae39c09ad4952d5c535b1be5330952..5f3698407d1c1e0f3f5770d8e8652d1d811ba8be 100644 --- a/src/internal/abi/iface.go +++ b/src/internal/abi/iface.go @@ -26,7 +26,7 @@ Type *Type Data unsafe.Pointer } -// EmptyInterface describes the layout of an interface that contains any methods. +// NonEmptyInterface describes the layout of an interface that contains any methods. type NonEmptyInterface struct { ITab *ITab Data unsafe.Pointer diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index e36ec08a5b0232656bd3f45b61ba2079d0da813f..689ca8ce58a6f2029fc93a2976c93df5bf5104cb 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -25,7 +25,7 @@ // // (This is not necessarily the set of experiments the compiler itself // was built with.) // -// experimentBaseline specifies the experiment flags that are enabled by +// Experiment.baseline specifies the experiment flags that are enabled by // default in the current toolchain. This is, in effect, the "control" // configuration and any variation from this is an experiment. var Experiment ExperimentFlags = func() ExperimentFlags { @@ -54,7 +54,7 @@ // ParseGOEXPERIMENT parses a (GOOS, GOARCH, GOEXPERIMENT) // configuration tuple and returns the enabled and baseline experiment // flag sets. // -// TODO(mdempsky): Move to internal/goexperiment. +// TODO(mdempsky): Move to [internal/goexperiment]. func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { // regabiSupported is set to true on platforms where register ABI is // supported and enabled by default. diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index ceff24193d89a59bfac804faf80871bc83a4d8d6..63a338883991e0638a417a71badeb9469571722e 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -14,32 +14,32 @@ // make.bash time. // // Experiments are exposed to the build in the following ways: // -// - Build tag goexperiment.x is set if experiment x (lower case) is -// enabled. +// - Build tag goexperiment.x is set if experiment x (lower case) is +// enabled. // -// - For each experiment x (in camel case), this package contains a -// boolean constant x and an integer constant xInt. +// - For each experiment x (in camel case), this package contains a +// boolean constant x and an integer constant xInt. // -// - In runtime assembly, the macro GOEXPERIMENT_x is defined if -// experiment x (lower case) is enabled. +// - In runtime assembly, the macro GOEXPERIMENT_x is defined if +// experiment x (lower case) is enabled. // // In the toolchain, the set of experiments enabled for the current // build should be accessed via objabi.Experiment. // -// The set of experiments is included in the output of runtime.Version() +// The set of experiments is included in the output of [runtime.Version]() // and "go version " if it differs from the default experiments. // // For the set of experiments supported by the current toolchain, see // "go doc goexperiment.Flags". // -// Note that this package defines the set of experiments (in Flags) +// Note that this package defines the set of experiments (in [Flags]) // and records the experiments that were enabled when the package // was compiled (as boolean and integer constants). // // Note especially that this package does not itself change behavior // at run time based on the GOEXPERIMENT variable. // The code used in builds to interpret the GOEXPERIMENT variable -// is in the separate package internal/buildcfg. +// is in the separate package [internal/buildcfg]. package goexperiment //go:generate go run mkconsts.go @@ -51,7 +51,7 @@ // When specified in the GOEXPERIMENT environment variable or as build // tags, experiments use the strings.ToLower of their field name. // // For the baseline experimental configuration, see -// [internal/buildcfg.ParseGOEXPERIMENT]. +// [internal/buildcfg.Experiment]. // // If you change this struct definition, run "go generate". type Flags struct { diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go index c38b498ea7e24c6d2825b067746d0f9fb637c160..7b231d554f50d11aaae1aeb9d8a4916b6f77ae7b 100644 --- a/src/internal/reflectlite/value.go +++ b/src/internal/reflectlite/value.go @@ -43,17 +43,19 @@ // Valid when either flagIndir is set or typ.pointers() is true. ptr unsafe.Pointer // flag holds metadata about the value. - // The lowest bits are flag bits: + // + // The lowest five bits give the Kind of the value, mirroring typ.Kind(). + // + // The next set of bits are flag bits: // - flagStickyRO: obtained via unexported not embedded field, so read-only // - flagEmbedRO: obtained via unexported embedded field, so read-only // - flagIndir: val holds a pointer to the data - // - flagAddr: v.CanAddr is true (implies flagIndir) - // Value cannot represent method values. - // The next five bits give the Kind of the value. - // This repeats typ.Kind() except for method values. - // The remaining 23+ bits give a method number for method values. - // If flag.kind() != Func, code can assume that flagMethod is unset. + // - flagAddr: v.CanAddr is true (implies flagIndir and ptr is non-nil) + // - flagMethod: v is a method value. // If ifaceIndir(typ), code can assume that flagIndir is set. + // + // The remaining 22+ bits give a method number for method values. + // If flag.kind() != Func, code can assume that flagMethod is unset. flag // A method value represents a curried method invocation diff --git a/src/internal/synctest/synctest_test.go b/src/internal/synctest/synctest_test.go index 222cae2597f883fc8314a6729d4918e565fb1026..6cebf86c31f4168fc9a053ed97a254fe2f8b26eb 100644 --- a/src/internal/synctest/synctest_test.go +++ b/src/internal/synctest/synctest_test.go @@ -654,6 +654,17 @@ } }) } +// https://go.dev/issue/74386 +func TestWaitGroupRacingAdds(t *testing.T) { + synctest.Run(func() { + var wg sync.WaitGroup + for range 100 { + wg.Go(func() {}) + } + wg.Wait() + }) +} + func TestWaitGroupOutOfBubble(t *testing.T) { var wg sync.WaitGroup wg.Add(1) @@ -705,29 +716,35 @@ }) }) } -func TestWaitGroupMovedBetweenBubblesWithZeroCount(t *testing.T) { +func TestWaitGroupDisassociateInWait(t *testing.T) { var wg sync.WaitGroup synctest.Run(func() { wg.Add(1) wg.Done() + // Count and waiters are 0, so Wait disassociates the WaitGroup. + wg.Wait() }) synctest.Run(func() { - // Reusing the WaitGroup is safe, because its count is zero. + // Reusing the WaitGroup is safe, because it is no longer bubbled. wg.Add(1) wg.Done() }) } -func TestWaitGroupMovedBetweenBubblesAfterWait(t *testing.T) { +func TestWaitGroupDisassociateInAdd(t *testing.T) { var wg sync.WaitGroup synctest.Run(func() { - wg.Go(func() {}) - wg.Wait() + wg.Add(1) + go wg.Wait() + synctest.Wait() // wait for Wait to block + // Count is 0 and waiters != 0, so Done wakes the waiters and + // disassociates the WaitGroup. + wg.Done() }) synctest.Run(func() { - // Reusing the WaitGroup is safe, because its count is zero. - wg.Go(func() {}) - wg.Wait() + // Reusing the WaitGroup is safe, because it is no longer bubbled. + wg.Add(1) + wg.Done() }) } diff --git a/src/internal/trace/event.go b/src/internal/trace/event.go index 21f1569f43fde511bf712482cd8f4d9230a81ef0..f31412e35d889afef2c1292b7ca59658d36781c6 100644 --- a/src/internal/trace/event.go +++ b/src/internal/trace/event.go @@ -8,6 +8,7 @@ import ( "fmt" "iter" "math" + "strconv" "strings" "time" @@ -812,6 +813,10 @@ // Kind-specific fields. switch kind := e.Kind(); kind { case EventMetric: m := e.Metric() + v := m.Value.String() + if m.Value.Kind() == ValueString { + v = strconv.Quote(v) + } fmt.Fprintf(&sb, " Name=%q Value=%s", m.Name, m.Value) case EventLabel: l := e.Label() diff --git a/src/internal/trace/gc.go b/src/internal/trace/gc.go index f5e8fe79f293be13c2640872d01a270325917b53..46890e784df24cf84e7584926c26c031ae0a5e53 100644 --- a/src/internal/trace/gc.go +++ b/src/internal/trace/gc.go @@ -103,7 +103,7 @@ m := ev.Metric() if m.Name != "/sched/gomaxprocs:threads" { break } - gomaxprocs := int(m.Value.ToUint64()) + gomaxprocs := int(m.Value.Uint64()) if len(ps) > gomaxprocs { if flags&UtilPerProc != 0 { // End each P's series. diff --git a/src/internal/trace/testdata/testprog/gc-stress.go b/src/internal/trace/testdata/testprog/gc-stress.go index 7979234c40a5714e3b614a219cab9cc058732f9a..74b63606d5e6199f53438ea67676a0dd674ee012 100644 --- a/src/internal/trace/testdata/testprog/gc-stress.go +++ b/src/internal/trace/testdata/testprog/gc-stress.go @@ -13,6 +13,7 @@ import ( "log" "os" "runtime" + "runtime/debug" "runtime/trace" "time" ) @@ -33,14 +34,28 @@ makeTree(depth - 1), makeTree(depth - 1), makeTree(depth - 1), }, + } +} + +func initTree(n *node) { + if n == nil { + return + } + for i := range n.data { + n.data[i] = 'a' + } + for i := range n.children { + initTree(n.children[i]) } } var trees [16]*node var ballast *[16]*[1024]*node -var sink [][]byte +var sink []*node func main() { + debug.SetMemoryLimit(50 << 20) + for i := range trees { trees[i] = makeTree(6) } @@ -55,13 +70,17 @@ } } procs := runtime.GOMAXPROCS(-1) - sink = make([][]byte, procs) + sink = make([]*node, procs) for i := 0; i < procs; i++ { i := i go func() { for { - sink[i] = make([]byte, 4<<10) + sink[i] = makeTree(3) + for range 5 { + initTree(sink[i]) + runtime.Gosched() + } } }() } diff --git a/src/internal/trace/testdata/testprog/stacks.go b/src/internal/trace/testdata/testprog/stacks.go index e64bc86844c4c109794f4ba38751d27f88aec43c..478daa0d941b2bc4a8b0336e108b9fd9835ad780 100644 --- a/src/internal/trace/testdata/testprog/stacks.go +++ b/src/internal/trace/testdata/testprog/stacks.go @@ -97,6 +97,11 @@ var data [1]byte rp.Read(data[:]) pipeReadDone <- true }() + go func() { // func12 + for { + syncPreemptPoint() + } + }() time.Sleep(100 * time.Millisecond) runtime.GC() @@ -127,3 +132,12 @@ trace.Stop() runtime.GOMAXPROCS(oldGoMaxProcs) } + +//go:noinline +func syncPreemptPoint() { + if never { + syncPreemptPoint() + } +} + +var never bool diff --git a/src/internal/trace/testtrace/validation.go b/src/internal/trace/testtrace/validation.go index 3de1e1d4bdf5ed4402c2438a52388bdce5600581..5edcf3a5b2dc0a4e18393a6bb85fb66e7e55e172 100644 --- a/src/internal/trace/testtrace/validation.go +++ b/src/internal/trace/testtrace/validation.go @@ -135,7 +135,7 @@ } switch m.Value.Kind() { case trace.ValueUint64: // Just make sure it doesn't panic. - _ = m.Value.ToUint64() + _ = m.Value.Uint64() } case trace.EventLabel: l := ev.Label() diff --git a/src/internal/trace/trace_test.go b/src/internal/trace/trace_test.go index 7eb50d0f4eafc524f2f01d7ddabeb80c630c369a..44b70553440242ba2844969191e3b6a3a0675614 100644 --- a/src/internal/trace/trace_test.go +++ b/src/internal/trace/trace_test.go @@ -326,7 +326,8 @@ // mainLine is the line number of `func main()` in testprog/stacks.go. const mainLine = 21 want := []evDesc{ {trace.EventStateTransition, "Goroutine Running->Runnable", []frame{ - {"main.main", mainLine + 82}, + {"runtime.Gosched", 0}, + {"main.main", mainLine + 87}, }}, {trace.EventStateTransition, "Goroutine NotExist->Runnable", []frame{ {"main.main", mainLine + 11}, @@ -349,7 +350,7 @@ {"main.main.func4", 0}, }}, {trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{ {"runtime.chansend1", 0}, - {"main.main", mainLine + 84}, + {"main.main", mainLine + 89}, }}, {trace.EventStateTransition, "Goroutine Running->Waiting", []frame{ {"runtime.chansend1", 0}, @@ -357,7 +358,7 @@ {"main.main.func5", 0}, }}, {trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{ {"runtime.chanrecv1", 0}, - {"main.main", mainLine + 85}, + {"main.main", mainLine + 90}, }}, {trace.EventStateTransition, "Goroutine Running->Waiting", []frame{ {"runtime.selectgo", 0}, @@ -365,7 +366,7 @@ {"main.main.func6", 0}, }}, {trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{ {"runtime.selectgo", 0}, - {"main.main", mainLine + 86}, + {"main.main", mainLine + 91}, }}, {trace.EventStateTransition, "Goroutine Running->Waiting", []frame{ {"sync.(*Mutex).Lock", 0}, @@ -382,7 +383,7 @@ }}, {trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{ {"sync.(*WaitGroup).Add", 0}, {"sync.(*WaitGroup).Done", 0}, - {"main.main", mainLine + 91}, + {"main.main", mainLine + 96}, }}, {trace.EventStateTransition, "Goroutine Running->Waiting", []frame{ {"sync.(*Cond).Wait", 0}, @@ -401,6 +402,10 @@ {"runtime.startTheWorld", 0}, // this is when the current gomaxprocs is logged. {"runtime.startTheWorldGC", 0}, {"runtime.GOMAXPROCS", 0}, {"main.main", 0}, + }}, + {trace.EventStateTransition, "Goroutine Running->Runnable", []frame{ + {"main.syncPreemptPoint", 0}, + {"main.main.func12", 0}, }}, } if !stress { @@ -582,13 +587,30 @@ testPath := filepath.Join("./testdata/testprog", progName) testName := progName runTest := func(t *testing.T, stress bool, extraGODEBUG string) { - // Run the program and capture the trace, which is always written to stdout. - cmd := testenv.Command(t, testenv.GoToolPath(t), "run") + // Build the program. + binFile, err := os.CreateTemp("", progName) + if err != nil { + t.Fatalf("failed to create temporary output file: %v", err) + } + bin := binFile.Name() + binFile.Close() + t.Cleanup(func() { + os.Remove(bin) + }) + buildCmd := testenv.CommandContext(t, t.Context(), testenv.GoToolPath(t), "build", "-o", bin) if race.Enabled { - cmd.Args = append(cmd.Args, "-race") + buildCmd.Args = append(buildCmd.Args, "-race") + } + buildCmd.Args = append(buildCmd.Args, testPath) + buildCmd.Env = append(os.Environ(), "GOEXPERIMENT=rangefunc") + buildOutput, err := buildCmd.CombinedOutput() + if err != nil { + t.Fatalf("failed to build %s: %v: output:\n%s", testPath, err, buildOutput) } - cmd.Args = append(cmd.Args, testPath) - cmd.Env = append(os.Environ(), "GOEXPERIMENT=rangefunc") + + // Run the program and capture the trace, which is always written to stdout. + cmd := testenv.CommandContext(t, t.Context(), bin) + // Add a stack ownership check. This is cheap enough for testing. godebug := "tracecheckstackownership=1" if stress { diff --git a/src/internal/trace/value.go b/src/internal/trace/value.go index bf396b6a9ee3943ac05e67678546ad5843cebc36..fc2808e59753a82da1a55081b2b45959ecd9d3ff 100644 --- a/src/internal/trace/value.go +++ b/src/internal/trace/value.go @@ -35,24 +35,27 @@ func (v Value) Kind() ValueKind { return v.kind } -// ToUint64 returns the uint64 value for a ValueUint64. +// Uint64 returns the uint64 value for a ValueUint64. // // Panics if this Value's Kind is not ValueUint64. -func (v Value) ToUint64() uint64 { +func (v Value) Uint64() uint64 { if v.kind != ValueUint64 { - panic("ToUint64 called on Value of a different Kind") + panic("Uint64 called on Value of a different Kind") } return v.scalar } -// ToString returns the uint64 value for a ValueString. -// -// Panics if this Value's Kind is not ValueString. -func (v Value) ToString() string { - if v.kind != ValueString { - panic("ToString called on Value of a different Kind") +// String returns the string value for a ValueString, and otherwise +// a string representation of the value for other kinds of values. +func (v Value) String() string { + if v.kind == ValueString { + return unsafe.String((*byte)(v.pointer), int(v.scalar)) } - return unsafe.String((*byte)(v.pointer), int(v.scalar)) + switch v.kind { + case ValueUint64: + return fmt.Sprintf("Value{Uint64(%d)}", v.Uint64()) + } + return "Value{Bad}" } func uint64Value(x uint64) Value { @@ -62,14 +65,3 @@ func stringValue(s string) Value { return Value{kind: ValueString, scalar: uint64(len(s)), pointer: unsafe.Pointer(unsafe.StringData(s))} } - -// String returns the string representation of the value. -func (v Value) String() string { - switch v.Kind() { - case ValueUint64: - return fmt.Sprintf("Value{Uint64(%d)}", v.ToUint64()) - case ValueString: - return fmt.Sprintf("Value{String(%s)}", v.ToString()) - } - return "Value{Bad}" -} diff --git a/src/iter/iter.go b/src/iter/iter.go index 4d408e5e77c65e54e71abdc4103055f6296fa3a2..3eaeb9e1fd6f1ff5b1d43d8dfd9d6a6573f33566 100644 --- a/src/iter/iter.go +++ b/src/iter/iter.go @@ -180,7 +180,7 @@ For example, a tree implementation might provide: // Positions returns an iterator over positions in the sequence. - func (t *Tree[V]) Positions() iter.Seq[*Pos] + func (t *Tree[V]) Positions() iter.Seq[*Pos[V]] // A Pos represents a position in the sequence. // It is only valid during the yield call it is passed to. diff --git a/src/net/http/csrf.go b/src/net/http/csrf.go index 8812a508ae21b826e98a7b5411afd324201acd7f..5e1b686fd1ccde085513d2f2dffe202487c3794a 100644 --- a/src/net/http/csrf.go +++ b/src/net/http/csrf.go @@ -136,7 +136,7 @@ default: if c.isRequestExempt(req) { return nil } - return errors.New("cross-origin request detected from Sec-Fetch-Site header") + return errCrossOriginRequest } origin := req.Header.Get("Origin") @@ -159,9 +159,14 @@ if c.isRequestExempt(req) { return nil } - return errors.New("cross-origin request detected, and/or browser is out of date: " + + return errCrossOriginRequestFromOldBrowser +} + +var ( + errCrossOriginRequest = errors.New("cross-origin request detected from Sec-Fetch-Site header") + errCrossOriginRequestFromOldBrowser = errors.New("cross-origin request detected, and/or browser is out of date: " + "Sec-Fetch-Site is missing, and Origin does not match Host") -} +) // isRequestExempt checks the bypasses which require taking a lock, and should // be deferred until the last moment. diff --git a/src/net/http/roundtrip_js.go b/src/net/http/roundtrip_js.go index 04c241eb4c006abe580cc23d7589489809152b00..7ae94617bcfb583cd24cd7d750cba48b777624d7 100644 --- a/src/net/http/roundtrip_js.go +++ b/src/net/http/roundtrip_js.go @@ -236,6 +236,14 @@ case <-req.Context().Done(): if !ac.IsUndefined() { // Abort the Fetch request. ac.Call("abort") + + // Wait for fetch promise to be rejected prior to exiting. See + // https://github.com/golang/go/issues/57098 for more details. + select { + case resp := <-respCh: + resp.Body.Close() + case <-errCh: + } } return nil, req.Context().Err() case resp := <-respCh: diff --git a/src/net/iprawsock.go b/src/net/iprawsock.go index 4c06b1b5aca4461840a3b076f00a1faf05ba83e2..76dded9ca16e120e6a1a97a2f0087b8f62d15011 100644 --- a/src/net/iprawsock.go +++ b/src/net/iprawsock.go @@ -24,9 +24,6 @@ // BUG(mikio): On JS and Plan 9, methods and functions related // to IPConn are not implemented. -// BUG(mikio): On Windows, the File method of IPConn is not -// implemented. - // IPAddr represents the address of an IP end point. type IPAddr struct { IP IP diff --git a/src/net/tcpsock.go b/src/net/tcpsock.go index 1b11a03f65ca4ffb7c759eb1c72d68a7400acdc5..9d215db1b2eec35e959adda4370dd659c3bdfe8a 100644 --- a/src/net/tcpsock.go +++ b/src/net/tcpsock.go @@ -14,7 +14,7 @@ "syscall" "time" ) -// BUG(mikio): On JS and Windows, the File method of TCPConn and +// BUG(mikio): On JS, the File method of TCPConn and // TCPListener is not implemented. // TCPAddr represents the address of a TCP end point. diff --git a/src/net/udpsock.go b/src/net/udpsock.go index 56aabffa3180e4b9aa55d9f1c56b6d5e04ef9c4e..35da018c307afbf2adaa161187ea42450fde91ec 100644 --- a/src/net/udpsock.go +++ b/src/net/udpsock.go @@ -14,9 +14,6 @@ // BUG(mikio): On Plan 9, the ReadMsgUDP and // WriteMsgUDP methods of UDPConn are not implemented. -// BUG(mikio): On Windows, the File method of UDPConn is not -// implemented. - // BUG(mikio): On JS, methods and functions related to UDPConn are not // implemented. diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go index fecfc97d13855a355a13fda3ea3ad20d023541e1..91a6831b04f4283e1b7580bdf8f9e350ff7a2341 100644 --- a/src/os/exec/exec.go +++ b/src/os/exec/exec.go @@ -17,7 +17,7 @@ // To expand environment variables, use package os's ExpandEnv. // // Note that the examples in this package assume a Unix system. // They may not run on Windows, and they do not run in the Go Playground -// used by golang.org and godoc.org. +// used by go.dev and pkg.go.dev. // // # Executables in the current directory // diff --git a/src/os/removeall_at.go b/src/os/removeall_at.go index a613aeeb9147d49d87961bc7c0fa4e36a0dc4754..5ddc1ade6134e5a26e9b52ed92dbb111f65fabd1 100644 --- a/src/os/removeall_at.go +++ b/src/os/removeall_at.go @@ -8,6 +8,7 @@ package os import ( "io" + "runtime" "syscall" ) @@ -34,7 +35,15 @@ // RemoveAll recurses by deleting the path base from // its parent directory parentDir, base := splitPath(path) - parent, err := Open(parentDir) + flag := O_RDONLY + if runtime.GOOS == "windows" { + // On Windows, the process might not have read permission on the parent directory, + // but still can delete files in it. See https://go.dev/issue/74134. + // We can open a file even if we don't have read permission by passing the + // O_WRONLY | O_RDWR flag, which is mapped to FILE_READ_ATTRIBUTES. + flag = O_WRONLY | O_RDWR + } + parent, err := OpenFile(parentDir, flag, 0) if IsNotExist(err) { // If parent does not exist, base cannot exist. Fail silently return nil diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index fb1a29d0605ffe3fa22e246b23e178035d550eb3..cd3e306a5753e561dc564059b4499823ea0ca306 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -8719,6 +8719,11 @@ testTypeAssert(t, any(int(1)), int(1), true) testTypeAssert(t, any(int(1)), byte(0), false) testTypeAssert(t, fmt.Stringer(vv), vv, true) + + testTypeAssert(t, any(nil), any(nil), false) + testTypeAssert(t, any(nil), error(nil), false) + testTypeAssert(t, error(nil), any(nil), false) + testTypeAssert(t, error(nil), error(nil), false) } func testTypeAssert[T comparable, V any](t *testing.T, val V, wantVal T, wantOk bool) { diff --git a/src/reflect/value.go b/src/reflect/value.go index 68b97e922928e81f809eb685405941520830dbf2..ffdf7896482f7e189a37b724cece4bc55c146736 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -1514,46 +1514,46 @@ v = makeMethodValue("TypeAssert", v) } typ := abi.TypeFor[T]() - if typ != v.typ() { - // We can't just return false here: - // - // var zero T - // return zero, false - // - // since this function should work in the same manner as v.Interface().(T) does. - // Thus we have to handle two cases specially. - // Return the element inside the interface. - // - // T is a concrete type and v is an interface. For example: - // - // var v any = int(1) - // val := ValueOf(&v).Elem() - // TypeAssert[int](val) == val.Interface().(int) - // - // T is a interface and v is an interface, but the iface types are different. For example: - // - // var v any = &someError{} - // val := ValueOf(&v).Elem() - // TypeAssert[error](val) == val.Interface().(error) - if v.kind() == Interface { - v, ok := packIfaceValueIntoEmptyIface(v).(T) - return v, ok - } + // If v is an interface, return the element inside the interface. + // + // T is a concrete type and v is an interface. For example: + // + // var v any = int(1) + // val := ValueOf(&v).Elem() + // TypeAssert[int](val) == val.Interface().(int) + // + // T is a interface and v is a non-nil interface value. For example: + // + // var v any = &someError{} + // val := ValueOf(&v).Elem() + // TypeAssert[error](val) == val.Interface().(error) + // + // T is a interface and v is a nil interface value. For example: + // + // var v error = nil + // val := ValueOf(&v).Elem() + // TypeAssert[error](val) == val.Interface().(error) + if v.kind() == Interface { + v, ok := packIfaceValueIntoEmptyIface(v).(T) + return v, ok + } - // T is an interface, v is a concrete type. For example: - // - // TypeAssert[any](ValueOf(1)) == ValueOf(1).Interface().(any) - // TypeAssert[error](ValueOf(&someError{})) == ValueOf(&someError{}).Interface().(error) - if typ.Kind() == abi.Interface { - v, ok := packEface(v).(T) - return v, ok - } + // If T is an interface and v is a concrete type. For example: + // + // TypeAssert[any](ValueOf(1)) == ValueOf(1).Interface().(any) + // TypeAssert[error](ValueOf(&someError{})) == ValueOf(&someError{}).Interface().(error) + if typ.Kind() == abi.Interface { + v, ok := packEface(v).(T) + return v, ok + } + // Both v and T must be concrete types. + // The only way for an type-assertion to match is if the types are equal. + if typ != v.typ() { var zero T return zero, false } - if v.flag&flagIndir == 0 { return *(*T)(unsafe.Pointer(&v.ptr)), true } diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 627c7cfdce5b8a44ac9d25b8f7c37987a64480f8..e80799581052a0979f490262f3f3a44a9a0ff401 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1052,9 +1052,17 @@ spc := makeSpanClass(0, false) h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() - s.limit = s.base() + s.elemsize s.freeindex = 1 s.allocCount = 1 + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole chunk or just skip it + // since we're in the mark phase anyway. + s.limit = s.base() + s.elemsize // Adjust size to include redzone. if asanenabled { diff --git a/src/runtime/debug.go b/src/runtime/debug.go index bdaaa7196d30bb22f5cba4d058557dd4db5e8fb7..c7592d33299681a0aed212cae61c0cb930effaed 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -39,7 +39,7 @@ } lock(&sched.lock) ret := int(gomaxprocs) - if n <= 0 || n == ret { + if n <= 0 { unlock(&sched.lock) return ret } @@ -51,6 +51,12 @@ // Wait for sysmon to complete running defaultGOMAXPROCS. lock(&computeMaxProcsLock) unlock(&computeMaxProcsLock) + + if n == ret { + // sched.customGOMAXPROCS set, but no need to actually STW + // since the gomaxprocs itself isn't changing. + return ret + } stw := stopTheWorldGC(stwGOMAXPROCS) diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index 440120cdfe8ceafdab636fdd9ab14637f2247614..a1d04d2f8a2e491e90ee317aa4ae7d2564e4619b 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -253,6 +253,14 @@ // Put the large span in the mcentral swept list so that it's // visible to the background sweeper. mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole object or just skip it + // since we're in the mark phase anyway. s.limit = s.base() + size s.initHeapBits() return s diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index c71ecbbcd54d66bb70b2ed68b6ad5c9f7fff6730..ec27ce25a88812bb44fc475a11a45a1014873413 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -250,13 +250,10 @@ // grow allocates a new empty span from the heap and initializes it for c's size class. func (c *mcentral) grow() *mspan { npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()]) - size := uintptr(gc.SizeClassToSize[c.spanclass.sizeclass()]) - s := mheap_.alloc(npages, c.spanclass) if s == nil { return nil } - s.limit = s.base() + size*uintptr(s.nelems) s.initHeapBits() return s } diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go index 9d6842ae52c3a6a0a4bc63f2df8890e2adaa5db9..05f0fdb5d74ed6d58ff39af0d0f5ed6c3714273d 100644 --- a/src/runtime/mem_sbrk.go +++ b/src/runtime/mem_sbrk.go @@ -231,6 +231,7 @@ if endLen := (uintptr(p) + size + align) - end; endLen > 0 { memFree(unsafe.Pointer(end), endLen) } memCheck() + unlock(&memlock) return unsafe.Pointer(pAligned), size } diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index 44db1fb3562f6bbdbd7494d208bce5f219c0720a..2d4a54c9332022903314dd8788be462edd4bf524 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -338,11 +338,6 @@ } return false } -//go:linkname unique_runtime_blockUntilEmptyFinalizerQueue unique.runtime_blockUntilEmptyFinalizerQueue -func unique_runtime_blockUntilEmptyFinalizerQueue(timeout int64) bool { - return blockUntilEmptyFinalizerQueue(timeout) -} - // SetFinalizer sets the finalizer associated with obj to the provided // finalizer function. When the garbage collector finds an unreachable block // with an associated finalizer, it clears the association and runs diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 38f343164cce3dc1efbf7f57c39163dca199a205..f2df1a00e0c6839d974f80ff03cace72ad2dab1e 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -1048,7 +1048,7 @@ curgp := mp.curg // N.B. The execution tracer is not aware of this status // transition and handles it specially based on the // wait reason. - casGToWaitingForGC(curgp, _Grunning, waitReasonGarbageCollection) + casGToWaitingForSuspendG(curgp, _Grunning, waitReasonGarbageCollection) // Run gc on the g0 stack. We do this so that the g stack // we're currently running on will no longer change. Cuts @@ -1522,7 +1522,8 @@ } systemstack(func() { // Mark our goroutine preemptible so its stack - // can be scanned. This lets two mark workers + // can be scanned or observed by the execution + // tracer. This, for example, lets two mark workers // scan each other (otherwise, they would // deadlock). We must not modify anything on // the G stack. However, stack shrinking is @@ -1532,7 +1533,7 @@ // // N.B. The execution tracer is not aware of this status // transition and handles it specially based on the // wait reason. - casGToWaitingForGC(gp, _Grunning, waitReasonGCWorkerActive) + casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCWorkerActive) switch pp.gcMarkWorkerMode { default: throw("gcBgMarkWorker: unexpected gcMarkWorkerMode") diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 507aac748282f14d28f7ec6d37c720ad859f25c6..a136c7aeaceda245e37cbe2eee09f1b3e413fb43 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -227,7 +227,7 @@ // worker or we're in mark termination. userG := getg().m.curg selfScan := gp == userG && readgstatus(userG) == _Grunning if selfScan { - casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan) + casGToWaitingForSuspendG(userG, _Grunning, waitReasonGarbageCollectionScan) } // TODO: suspendG blocks (and spins) until gp @@ -682,7 +682,7 @@ throw("nwait > work.nprocs") } // gcDrainN requires the caller to be preemptible. - casGToWaitingForGC(gp, _Grunning, waitReasonGCAssistMarking) + casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCAssistMarking) // drain own cached work first in the hopes that it // will be more cache friendly. diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 75c347b9e93bcfc9a45a65ff0c6c72c285ca7181..ac2b1732f930265aac262ba3e3726c1a1b4e848c 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -111,6 +111,26 @@ return spanScanOwnership(atomic.Or32(o32, uint32(v)<> off) } func (imb *spanInlineMarkBits) init(class spanClass) { + if imb == nil { + // This nil check and throw is almost pointless. Normally we would + // expect imb to never be nil. However, this is called on potentially + // freshly-allocated virtual memory. As of 2025, the compiler-inserted + // nil check is not a branch but a memory read that we expect to fault + // if the pointer really is nil. + // + // However, this causes a read of the page, and operating systems may + // take it as a hint to back the accessed memory with a read-only zero + // page. However, we immediately write to this memory, which can then + // force operating systems to have to update the page table and flush + // the TLB, causing a lot of churn for programs that are short-lived + // and monotonically grow in size. + // + // This nil check is thus an explicit branch instead of what the compiler + // would insert circa 2025, which is a memory read instruction. + // + // See go.dev/issue/74375 for details. + throw("runtime: span inline mark bits nil?") + } *imb = spanInlineMarkBits{} imb.class = class } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 9361089b801e18616eee72128703365c42b599ac..f25dbb429d7f78b66582fdea3bfe0428a53fd36d 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -312,8 +312,10 @@ // Reads are done atomically to find spans containing specials // during marking. pageSpecials [pagesPerArena / 8]uint8 - // pageUseSpanDartboard is a bitmap that indicates which spans are - // heap spans and also gcUsesSpanDartboard. + // pageUseSpanInlineMarkBits is a bitmap where each bit corresponds + // to a span, as only spans one page in size can have inline mark bits. + // The bit indicates that the span has a spanInlineMarkBits struct + // stored directly at the top end of the span's memory. pageUseSpanInlineMarkBits [pagesPerArena / 8]uint8 // checkmarks stores the debug.gccheckmark state. It is only @@ -1445,7 +1447,6 @@ nbytes := npages * pageSize if typ.manual() { s.manualFreeList = 0 s.nelems = 0 - s.limit = s.base() + s.npages*pageSize s.state.set(mSpanManual) } else { // We must set span properties before the span is published anywhere @@ -1485,6 +1486,9 @@ s.freeIndexForScan = 0 s.allocCache = ^uint64(0) // all 1s indicating all free. s.gcmarkBits = newMarkBits(uintptr(s.nelems)) s.allocBits = newAllocBits(uintptr(s.nelems)) + + // Adjust s.limit down to the object-containing part of the span. + s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) // It's safe to access h.sweepgen without the heap lock because it's // only ever updated with the world stopped and we run on the @@ -1785,6 +1789,7 @@ span.prev = nil span.list = nil span.startAddr = base span.npages = npages + span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans span.allocCount = 0 span.spanclass = 0 span.elemsize = 0 diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 37a7b7f6849e7d8d1ec6faf76836707e617e8911..b41bbe93cf57c755274bc4b18c47e875d8635da4 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -1059,6 +1059,28 @@ sched.nmspinning.Add(1) sched.needspinning.Store(0) } +// Take a snapshot of allp, for use after dropping the P. +// +// Must be called with a P, but the returned slice may be used after dropping +// the P. The M holds a reference on the snapshot to keep the backing array +// alive. +// +//go:yeswritebarrierrec +func (mp *m) snapshotAllp() []*p { + mp.allpSnapshot = allp + return mp.allpSnapshot +} + +// Clear the saved allp snapshot. Should be called as soon as the snapshot is +// no longer required. +// +// Must be called after reacquiring a P, as it requires a write barrier. +// +//go:yeswritebarrierrec +func (mp *m) clearAllpSnapshot() { + mp.allpSnapshot = nil +} + func (mp *m) hasCgoOnStack() bool { return mp.ncgo > 0 || mp.isextra } @@ -1347,13 +1369,13 @@ gp.waitreason = reason casgstatus(gp, old, _Gwaiting) } -// casGToWaitingForGC transitions gp from old to _Gwaiting, and sets the wait reason. -// The wait reason must be a valid isWaitingForGC wait reason. +// casGToWaitingForSuspendG transitions gp from old to _Gwaiting, and sets the wait reason. +// The wait reason must be a valid isWaitingForSuspendG wait reason. // // Use this over casgstatus when possible to ensure that a waitreason is set. -func casGToWaitingForGC(gp *g, old uint32, reason waitReason) { - if !reason.isWaitingForGC() { - throw("casGToWaitingForGC with non-isWaitingForGC wait reason") +func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) { + if !reason.isWaitingForSuspendG() { + throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason") } casGToWaiting(gp, old, reason) } @@ -1487,23 +1509,7 @@ semacquire(&worldsema) gp := getg() gp.m.preemptoff = reason.String() systemstack(func() { - // Mark the goroutine which called stopTheWorld preemptible so its - // stack may be scanned. - // This lets a mark worker scan us while we try to stop the world - // since otherwise we could get in a mutual preemption deadlock. - // We must not modify anything on the G stack because a stack shrink - // may occur. A stack shrink is otherwise OK though because in order - // to return from this function (and to leave the system stack) we - // must have preempted all goroutines, including any attempting - // to scan our stack, in which case, any stack shrinking will - // have already completed by the time we exit. - // - // N.B. The execution tracer is not aware of this status - // transition and handles it specially based on the - // wait reason. - casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld) stopTheWorldContext = stopTheWorldWithSema(reason) // avoid write to stack - casgstatus(gp, _Gwaiting, _Grunning) }) return stopTheWorldContext } @@ -1592,7 +1598,30 @@ // stopTheWorld to block. // // Returns the STW context. When starting the world, this context must be // passed to startTheWorldWithSema. +// +//go:systemstack func stopTheWorldWithSema(reason stwReason) worldStop { + // Mark the goroutine which called stopTheWorld preemptible so its + // stack may be scanned by the GC or observed by the execution tracer. + // + // This lets a mark worker scan us or the execution tracer take our + // stack while we try to stop the world since otherwise we could get + // in a mutual preemption deadlock. + // + // We must not modify anything on the G stack because a stack shrink + // may occur, now that we switched to _Gwaiting, specifically if we're + // doing this during the mark phase (mark termination excepted, since + // we know that stack scanning is done by that point). A stack shrink + // is otherwise OK though because in order to return from this function + // (and to leave the system stack) we must have preempted all + // goroutines, including any attempting to scan our stack, in which + // case, any stack shrinking will have already completed by the time we + // exit. + // + // N.B. The execution tracer is not aware of this status transition and + // andles it specially based on the wait reason. + casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld) + trace := traceAcquire() if trace.ok() { trace.STWStart(reason) @@ -1699,6 +1728,9 @@ throw(bad) } worldStopped() + + // Switch back to _Grunning, now that the world is stopped. + casgstatus(getg().m.curg, _Gwaiting, _Grunning) return worldStop{ reason: reason, @@ -2068,15 +2100,23 @@ // part of the current goroutine's stack, since the GC may move it. func forEachP(reason waitReason, fn func(*p)) { systemstack(func() { gp := getg().m.curg - // Mark the user stack as preemptible so that it may be scanned. - // Otherwise, our attempt to force all P's to a safepoint could - // result in a deadlock as we attempt to preempt a worker that's - // trying to preempt us (e.g. for a stack scan). + // Mark the user stack as preemptible so that it may be scanned + // by the GC or observed by the execution tracer. Otherwise, our + // attempt to force all P's to a safepoint could result in a + // deadlock as we attempt to preempt a goroutine that's trying + // to preempt us (e.g. for a stack scan). + // + // We must not modify anything on the G stack because a stack shrink + // may occur. A stack shrink is otherwise OK though because in order + // to return from this function (and to leave the system stack) we + // must have preempted all goroutines, including any attempting + // to scan our stack, in which case, any stack shrinking will + // have already completed by the time we exit. // // N.B. The execution tracer is not aware of this status // transition and handles it specially based on the // wait reason. - casGToWaitingForGC(gp, _Grunning, reason) + casGToWaitingForSuspendG(gp, _Grunning, reason) forEachPInternal(fn) casgstatus(gp, _Gwaiting, _Grunning) }) @@ -3289,10 +3329,10 @@ // the world. tryRecordGoroutineProfile(gp, nil, osyield) } - // Assign gp.m before entering _Grunning so running Gs have an - // M. + // Assign gp.m before entering _Grunning so running Gs have an M. mp.curg = gp gp.m = mp + gp.syncSafePoint = false // Clear the flag, which may have been set by morestack. casgstatus(gp, _Grunnable, _Grunning) gp.waitsince = 0 gp.preempt = false @@ -3328,6 +3368,11 @@ // findrunnable would return a G to run, handoffp must start // an M. top: + // We may have collected an allp snapshot below. The snapshot is only + // required in each loop iteration. Clear it to all GC to collect the + // slice. + mp.clearAllpSnapshot() + pp := mp.p.ptr() if sched.gcwaiting.Load() { gcstopm() @@ -3509,7 +3554,11 @@ // Before we drop our P, make a snapshot of the allp slice, // which can change underfoot once we no longer block // safe-points. We don't need to snapshot the contents because // everything up to cap(allp) is immutable. - allpSnapshot := allp + // + // We clear the snapshot from the M after return via + // mp.clearAllpSnapshop (in schedule) and on each iteration of the top + // loop. + allpSnapshot := mp.snapshotAllp() // Also snapshot masks. Value changes are OK, but we can't allow // len to change out from under us. idlepMaskSnapshot := idlepMask @@ -3649,6 +3698,9 @@ // adjusttimers which may need to allocate memory, and that isn't // allowed when we don't have an active P. pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil) } + + // We don't need allp anymore at this pointer, but can't clear the + // snapshot without a P for the write barrier.. // Poll network until next timer. if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 { @@ -4084,6 +4136,11 @@ throw("schedule: spinning with local work") } gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available + + // findRunnable may have collected an allp snapshot. The snapshot is + // only required within findRunnable. Clear it to all GC to collect the + // slice. + mp.clearAllpSnapshot() if debug.dontfreezetheworld > 0 && freezing.Load() { // See comment in freezetheworld. We don't want to perturb diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 2286868567a322e735b3e9a380685a58fefdbf39..19ad29c12717d277bc0189fa54efc5048847a15c 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -528,11 +528,12 @@ got, err := cmd.CombinedOutput() t.Logf("gdb output:\n%s", got) if err != nil { + noProcessRE := regexp.MustCompile(`Couldn't get [a-zA-Z_ -]* ?registers: No such process\.`) switch { case bytes.Contains(got, []byte("internal-error: wait returned unexpected status 0x0")): // GDB bug: https://sourceware.org/bugzilla/show_bug.cgi?id=28551 testenv.SkipFlaky(t, 43068) - case bytes.Contains(got, []byte("Couldn't get registers: No such process.")), + case noProcessRE.Match(got), bytes.Contains(got, []byte("Unable to fetch general registers.: No such process.")), bytes.Contains(got, []byte("reading register pc (#64): No such process.")): // GDB bug: https://sourceware.org/bugzilla/show_bug.cgi?id=9086 diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index d1b31be172e65f586fdf4995fbd3a9bbbb915522..527611f96a29d9beac11acf65bb1d4d0e49cd95e 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -466,6 +466,7 @@ trackingStamp int64 // timestamp of when the G last started being tracked runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking lockedm muintptr fipsIndicator uint8 + syncSafePoint bool // set if g is stopped at a synchronous safe point. runningCleanups atomic.Bool sig uint32 writebuf []byte @@ -564,6 +565,7 @@ freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) needextram bool g0StackAccurate bool // whether the g0 stack has accurate bounds traceback uint8 + allpSnapshot []*p // Snapshot of allp for use after dropping P in findRunnable, nil otherwise. ncgocall uint64 // number of cgo calls in total ncgo int32 // number of cgo calls currently in progress cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily @@ -1163,17 +1165,17 @@ w == waitReasonSyncRWMutexRLock || w == waitReasonSyncRWMutexLock } -func (w waitReason) isWaitingForGC() bool { - return isWaitingForGC[w] +func (w waitReason) isWaitingForSuspendG() bool { + return isWaitingForSuspendG[w] } -// isWaitingForGC indicates that a goroutine is only entering _Gwaiting and -// setting a waitReason because it needs to be able to let the GC take ownership -// of its stack. The G is always actually executing on the system stack, in -// these cases. +// isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and +// setting a waitReason because it needs to be able to let the suspendG +// (used by the GC and the execution tracer) take ownership of its stack. +// The G is always actually executing on the system stack in these cases. // // TODO(mknyszek): Consider replacing this with a new dedicated G status. -var isWaitingForGC = [len(waitReasonStrings)]bool{ +var isWaitingForSuspendG = [len(waitReasonStrings)]bool{ waitReasonStoppingTheWorld: true, waitReasonGCMarkTermination: true, waitReasonGarbageCollection: true, diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 7e69d65fbb7e501920399e6bd8db841d2805cb29..a338708d76fca83dcefd268d9200978aeb74fb3e 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -1115,6 +1115,9 @@ gp.preemptShrink = false shrinkstack(gp) } + // Set a flag indicated that we've been synchronously preempted. + gp.syncSafePoint = true + if gp.preemptStop { preemptPark(gp) // never returns } @@ -1212,14 +1215,14 @@ if gp.parkingOnChan.Load() { return false } // We also can't copy the stack while tracing is enabled, and - // gp is in _Gwaiting solely to make itself available to the GC. + // gp is in _Gwaiting solely to make itself available to suspendG. // In these cases, the G is actually executing on the system // stack, and the execution tracer may want to take a stack trace // of the G's stack. Note: it's safe to access gp.waitreason here. // We're only checking if this is true if we took ownership of the // G with the _Gscan bit. This prevents the goroutine from transitioning, // which prevents gp.waitreason from changing. - if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForGC() { + if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForSuspendG() { return false } return true diff --git a/src/runtime/testdata/testprog/gomaxprocs.go b/src/runtime/testdata/testprog/gomaxprocs.go index 915e3c4dad4e12879f9a4f7d88340448aab5d86a..99bc9f1dbb350a50dcc4559973a2ff53a460ac05 100644 --- a/src/runtime/testdata/testprog/gomaxprocs.go +++ b/src/runtime/testdata/testprog/gomaxprocs.go @@ -133,6 +133,20 @@ runtime.GOMAXPROCS(3) mustSetCPUMax(path, 200000) mustNotChangeMaxProcs(3) + // Re-enable updates. Change is immediately visible. + runtime.SetDefaultGOMAXPROCS() + procs = runtime.GOMAXPROCS(0) + println("GOMAXPROCS:", procs) + if procs != 2 { + panic(fmt.Sprintf("GOMAXPROCS got %d want %d", procs, 2)) + } + + // Setting GOMAXPROCS to itself also disables updates, despite not + // changing the value itself. + runtime.GOMAXPROCS(runtime.GOMAXPROCS(0)) + mustSetCPUMax(path, 300000) + mustNotChangeMaxProcs(2) + println("OK") } diff --git a/src/runtime/testdata/testprogcgo/needmdeadlock.go b/src/runtime/testdata/testprogcgo/needmdeadlock.go index b95ec7746895b2f8c433036dad3ff639a1f368cc..f4710488c9e16300801b8fe755a1339c73262914 100644 --- a/src/runtime/testdata/testprogcgo/needmdeadlock.go +++ b/src/runtime/testdata/testprogcgo/needmdeadlock.go @@ -70,8 +70,6 @@ import "C" import ( "fmt" - "os" - "time" ) func init() { @@ -84,12 +82,8 @@ } func NeedmDeadlock() { // The failure symptom is that the program hangs because of a - // deadlock in needm, so set an alarm. - go func() { - time.Sleep(5 * time.Second) - fmt.Println("Hung for 5 seconds") - os.Exit(1) - }() + // deadlock in needm. Instead of using an arbitrary timeout, + // we let the test deadline expire if it deadlocks. C.runNeedmSignalThread() fmt.Println("OK") diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 139cbba6a9f667f4ff020ecd3b8bb95389a9b3e0..b92e7b4e8e36569b89deae70c77b004da6853733 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -331,7 +331,7 @@ // altogether instead of advancing to the next generation. // // traceAdvanceSema must not be held. // -// traceAdvance is called by golang.org/x/exp/trace using linkname. +// traceAdvance is called by runtime/trace and golang.org/x/exp/trace using linkname. // //go:linkname traceAdvance func traceAdvance(stopTrace bool) { @@ -376,7 +376,7 @@ systemstack(func() { me := getg().m.curg // We don't have to handle this G status transition because we // already eliminated ourselves from consideration above. - casGToWaitingForGC(me, _Grunning, waitReasonTraceGoroutineStatus) + casGToWaitingForSuspendG(me, _Grunning, waitReasonTraceGoroutineStatus) // We need to suspend and take ownership of the G to safely read its // goid. Note that we can't actually emit the event at this point // because we might stop the G in a window where it's unsafe to write @@ -956,7 +956,7 @@ // traceReaderAvailable returns the trace reader if it is not currently // scheduled and should be. Callers should first check that // (traceEnabled() || traceShuttingDown()) is true. func traceReaderAvailable() *g { - // There are three conditions under which we definitely want to schedule + // There are two conditions under which we definitely want to schedule // the reader: // - The reader is lagging behind in finishing off the last generation. // In this case, trace buffers could even be empty, but the trace @@ -965,12 +965,10 @@ // to schedule the reader ASAP. // - The reader has pending work to process for it's reader generation // (assuming readerGen is not lagging behind). Note that we also want // to be careful *not* to schedule the reader if there's no work to do. - // - The trace is shutting down. The trace stopper blocks on the reader - // to finish, much like trace advancement. // // We also want to be careful not to schedule the reader if there's no // reason to. - if trace.flushedGen.Load() == trace.readerGen.Load() || trace.workAvailable.Load() || trace.shutdown.Load() { + if trace.flushedGen.Load() == trace.readerGen.Load() || trace.workAvailable.Load() { return trace.reader.Load() } return nil diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index 39adeb4c07ea37608478c86dc0ef308ab5b04a11..a2775a3427194373f47cd362d75de3495942db85 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -457,7 +457,7 @@ } // GoStop emits a GoStop event with the provided reason. func (tl traceLocker) GoStop(reason traceGoStopReason) { - tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1)) + tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(0)) } // GoPark emits a GoBlock event with the provided reason. diff --git a/src/runtime/tracestack.go b/src/runtime/tracestack.go index bca2d0a88deec4be42a9549280f29c172d3c9f1f..2ee68c85f0e80c2a19e9afca3eb34f3a5c586769 100644 --- a/src/runtime/tracestack.go +++ b/src/runtime/tracestack.go @@ -109,7 +109,22 @@ pcBuf[1] = gp.syscallpc nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.syscallbp), pcBuf[2:]) } else { pcBuf[1] = gp.sched.pc - nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[2:]) + if gp.syncSafePoint { + // We're stopped in morestack, which is an odd state because gp.sched.bp + // refers to our parent frame, since we haven't had the chance to push our + // frame pointer to the stack yet. If we just start walking from gp.sched.bp, + // we'll skip a frame as a result. Luckily, we can find the PC we want right + // at gp.sched.sp on non-LR platforms, and we have it directly on LR platforms. + // See issue go.dev/issue/68090. + if usesLR { + pcBuf[2] = gp.sched.lr + } else { + pcBuf[2] = *(*uintptr)(unsafe.Pointer(gp.sched.sp)) + } + nstk += 2 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[3:]) + } else { + nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[2:]) + } } } } diff --git a/src/runtime/tracestatus.go b/src/runtime/tracestatus.go index 4dabc8e562f3de41d17c0ef6bdd87e0e740e29e9..03ec81fc0262a1f496c471884321a401aaa0a256 100644 --- a/src/runtime/tracestatus.go +++ b/src/runtime/tracestatus.go @@ -126,11 +126,12 @@ case _Gwaiting, _Gpreempted: // There are a number of cases where a G might end up in // _Gwaiting but it's actually running in a non-preemptive // state but needs to present itself as preempted to the - // garbage collector. In these cases, we're not going to - // emit an event, and we want these goroutines to appear in - // the final trace as if they're running, not blocked. + // garbage collector and traceAdvance (via suspendG). In + // these cases, we're not going to emit an event, and we + // want these goroutines to appear in the final trace as + // if they're running, not blocked. tgs = tracev2.GoWaiting - if status == _Gwaiting && wr.isWaitingForGC() { + if status == _Gwaiting && wr.isWaitingForSuspendG() { tgs = tracev2.GoRunning } case _Gdead: diff --git a/src/runtime/tracetime.go b/src/runtime/tracetime.go index 7ffab79badb5399ab61c06306a03dd261b86a76f..8be5c3d1306201b3670e73941419d7245c7d890e 100644 --- a/src/runtime/tracetime.go +++ b/src/runtime/tracetime.go @@ -51,7 +51,7 @@ // // nosplit because it's called from exitsyscall and various trace writing functions, // which are nosplit. // -// traceClockNow is called by golang.org/x/exp/trace using linkname. +// traceClockNow is called by runtime/trace and golang.org/x/exp/trace using linkname. // //go:linkname traceClockNow //go:nosplit diff --git a/src/slices/slices_test.go b/src/slices/slices_test.go index edf7e7b610e0230aaa4593b387a457d9debdca85..19a3e9b0ddbdb715bdbb07b3fdd2ae8c9da21b57 100644 --- a/src/slices/slices_test.go +++ b/src/slices/slices_test.go @@ -1454,6 +1454,8 @@ } } } +var leak *int + func TestIssue68488(t *testing.T) { s := make([]int, 3) clone := Clone(s[1:1]) @@ -1461,6 +1463,7 @@ switch unsafe.SliceData(clone) { case &s[0], &s[1], &s[2]: t.Error("clone keeps alive s due to array overlap") } + leak = &s[1] // see go.dev/issue/74387 } // This test asserts the behavior when the primary slice operand is nil. diff --git a/src/sync/waitgroup.go b/src/sync/waitgroup.go index 0bd618a241bb0b44b6e73b220fa2995670a0fbbe..5b035aa3967ad35e632d6acd5548b657eff1bdd5 100644 --- a/src/sync/waitgroup.go +++ b/src/sync/waitgroup.go @@ -120,13 +120,6 @@ } if w != 0 && delta > 0 && v == int32(delta) { panic("sync: WaitGroup misuse: Add called concurrently with Wait") } - if v == 0 && bubbled { - // Disassociate the WaitGroup from its bubble. - synctest.Disassociate(wg) - if w == 0 { - wg.state.Store(0) - } - } if v > 0 || w == 0 { return } @@ -140,6 +133,11 @@ panic("sync: WaitGroup misuse: Add called concurrently with Wait") } // Reset waiters count to 0. wg.state.Store(0) + if bubbled { + // Adds must not happen concurrently with wait when counter is 0, + // so we can safely disassociate wg from its current bubble. + synctest.Disassociate(wg) + } for ; w != 0; w-- { runtime_Semrelease(&wg.sema, false, 0) } @@ -166,12 +164,19 @@ } for { state := wg.state.Load() v := int32(state >> 32) - w := uint32(state) + w := uint32(state & 0x7fffffff) if v == 0 { // Counter is 0, no need to wait. if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(wg)) + } + if w == 0 && state&waitGroupBubbleFlag != 0 && synctest.IsAssociated(wg) { + // Adds must not happen concurrently with wait when counter is 0, + // so we can disassociate wg from its current bubble. + if wg.state.CompareAndSwap(state, 0) { + synctest.Disassociate(wg) + } } return } diff --git a/src/testing/synctest/helper_test.go b/src/testing/synctest/helper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7547d3eac6991d218a687c99ce477aeb8c805815 --- /dev/null +++ b/src/testing/synctest/helper_test.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package synctest_test + +import "testing" + +// helperLog is a t.Helper which logs. +// Since it is a helper, the log prefix should contain +// the caller's file, not helper_test.go. +func helperLog(t *testing.T, s string) { + t.Helper() + t.Log(s) +} diff --git a/src/testing/synctest/synctest_test.go b/src/testing/synctest/synctest_test.go index 822fd6fe1c72c42aa7458b63a403ccb99140c724..9c731787509fecab2869ad668f481936d3ccaaad 100644 --- a/src/testing/synctest/synctest_test.go +++ b/src/testing/synctest/synctest_test.go @@ -140,6 +140,18 @@ }) }) } +func TestHelper(t *testing.T) { + runTest(t, []string{"-test.v"}, func() { + synctest.Test(t, func(t *testing.T) { + helperLog(t, "log in helper") + }) + }, `^=== RUN TestHelper + synctest_test.go:.* log in helper +--- PASS: TestHelper.* +PASS +$`) +} + func wantPanic(t *testing.T, want string) { if e := recover(); e != nil { if got := fmt.Sprint(e); got != want { diff --git a/src/testing/testing.go b/src/testing/testing.go index b2d4c0c938af813129a9e115a1af59265366c849..3475bfca4a6976cdd2d2e0a80702583d2585ab13 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -1261,6 +1261,9 @@ // Helper marks the calling function as a test helper function. // When printing file and line information, that function will be skipped. // Helper may be called simultaneously from multiple goroutines. func (c *common) Helper() { + if c.isSynctest { + c = c.parent + } c.mu.Lock() defer c.mu.Unlock() if c.helperPCs == nil { diff --git a/test/fixedbugs/issue74379.go b/test/fixedbugs/issue74379.go new file mode 100644 index 0000000000000000000000000000000000000000..e516505fbe54078a5330e369f1607e27a8ae4e33 --- /dev/null +++ b/test/fixedbugs/issue74379.go @@ -0,0 +1,30 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" + "fmt" + "os" +) + +func crashOnErr(err error) bool { + if err != nil { + panic(err) + } + return false +} + +func main() { + defer func() { + if recover() == nil { + fmt.Println("failed to have expected panic") + os.Exit(1) + } + }() + fmt.Println(crashOnErr(errors.New("test error"))) +} diff --git a/test/fixedbugs/issue74379b.go b/test/fixedbugs/issue74379b.go new file mode 100644 index 0000000000000000000000000000000000000000..2603587914ac51321846329a5aa7ee0a0c4b61e5 --- /dev/null +++ b/test/fixedbugs/issue74379b.go @@ -0,0 +1,32 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" + "fmt" + "os" +) + +func crashOnErr(err error) int { + if err != nil { + panic(err) + } + return 10 +} + +func main() { + defer func() { + if recover() == nil { + fmt.Println("failed to have expected panic") + os.Exit(1) + } + }() + + s := make([]int, crashOnErr(errors.New("test error"))) + println("unreachable: len(s) =", len(s)) +} diff --git a/test/fixedbugs/issue74379c.go b/test/fixedbugs/issue74379c.go new file mode 100644 index 0000000000000000000000000000000000000000..871307bf89a4e9eb35ae13e31ab851b706bb0cdf --- /dev/null +++ b/test/fixedbugs/issue74379c.go @@ -0,0 +1,54 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" + "fmt" + "os" +) + +type S struct{ a, b int } + +func crashOnErr1(err error) S { + if err != nil { + panic(err) + } + return S{} // zero value struct +} + +func f1() { + defer func() { + if recover() == nil { + fmt.Println("failed to have expected panic") + os.Exit(1) + } + }() + fmt.Println(crashOnErr1(errors.New("test error"))) +} + +func crashOnErr2(err error) S { + if err != nil { + panic(err) + } + return S{1, 2} // not zero value struct +} + +func f2() { + defer func() { + if recover() == nil { + fmt.Println("failed to have expected panic") + os.Exit(1) + } + }() + fmt.Println(crashOnErr2(errors.New("test error"))) +} + +func main() { + f1() + f2() +}