diff --git a/go.mod b/go.mod index 2e3caed..01c4d36 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.0 require ( golang.org/x/exp v0.0.0-20230809094429-853ea248256d golang.org/x/mod v0.23.0 - golang.org/x/tools v0.29.0 + golang.org/x/tools v0.30.0 ) -require golang.org/x/sync v0.10.0 // indirect +require golang.org/x/sync v0.11.0 // indirect diff --git a/go.sum b/go.sum index 52369f9..2243692 100644 --- a/go.sum +++ b/go.sum @@ -4,7 +4,7 @@ golang.org/x/exp v0.0.0-20230809094429-853ea248256d h1:wu5bD43Ana/nF1ZmaLr3lW/FQ golang.org/x/exp v0.0.0-20230809094429-853ea248256d/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee..b832259 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go index 3cc2bec..775fd20 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go +++ b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go @@ -16,6 +16,7 @@ import ( "path/filepath" "regexp" "runtime" + "slices" "sort" "strconv" "strings" @@ -35,6 +36,12 @@ import ( // and populates it with a GOPATH-style project using filemap (which // maps file names to contents). On success it returns the name of the // directory and a cleanup function to delete it. +// +// TODO(adonovan): provide a newer version that accepts a testing.T, +// calls T.TempDir, and calls T.Fatal on any error, avoiding the need +// to return cleanup or err: +// +// func WriteFilesToTmp(t *testing.T filemap map[string]string) string func WriteFiles(filemap map[string]string) (dir string, cleanup func(), err error) { gopath, err := os.MkdirTemp("", "analysistest") if err != nil { @@ -167,50 +174,27 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns act := result.Action // file -> message -> edits + // TODO(adonovan): this mapping assumes fix.Messages are unique across analyzers, + // whereas they are only unique within a given Diagnostic. fileEdits := make(map[*token.File]map[string][]diff.Edit) - fileContents := make(map[*token.File][]byte) - // Validate edits, prepare the fileEdits map and read the file contents. + // We may assume that fixes are validated upon creation in Pass.Report. + // Group fixes by file and message. for _, diag := range act.Diagnostics { for _, fix := range diag.SuggestedFixes { - // Assert that lazy fixes have a Category (#65578, #65087). if inTools && len(fix.TextEdits) == 0 && diag.Category == "" { t.Errorf("missing Diagnostic.Category for SuggestedFix without TextEdits (gopls requires the category for the name of the fix command") } for _, edit := range fix.TextEdits { - start, end := edit.Pos, edit.End - if !end.IsValid() { - end = start - } - // Validate the edit. - if start > end { - t.Errorf( - "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)", - act.Analyzer.Name, start, end) - continue - } - file, endfile := act.Package.Fset.File(start), act.Package.Fset.File(end) - if file == nil || endfile == nil || file != endfile { - t.Errorf( - "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v", - act.Analyzer.Name, file.Name(), endfile.Name()) - continue - } - if _, ok := fileContents[file]; !ok { - contents, err := os.ReadFile(file.Name()) - if err != nil { - t.Errorf("error reading %s: %v", file.Name(), err) - } - fileContents[file] = contents - } + file := act.Package.Fset.File(edit.Pos) if _, ok := fileEdits[file]; !ok { fileEdits[file] = make(map[string][]diff.Edit) } fileEdits[file][fix.Message] = append(fileEdits[file][fix.Message], diff.Edit{ - Start: file.Offset(start), - End: file.Offset(end), + Start: file.Offset(edit.Pos), + End: file.Offset(edit.End), New: string(edit.NewText), }) } @@ -219,9 +203,10 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns for file, fixes := range fileEdits { // Get the original file contents. - orig, ok := fileContents[file] - if !ok { - t.Errorf("could not find file contents for %s", file.Name()) + // TODO(adonovan): plumb pass.ReadFile. + orig, err := os.ReadFile(file.Name()) + if err != nil { + t.Errorf("error reading %s: %v", file.Name(), err) continue } @@ -242,8 +227,15 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns t.Errorf("%s.golden has leading comment; we don't know what to do with it", file.Name()) continue } - - for sf, edits := range fixes { + // Sort map keys for determinism in tests. + // TODO(jba): replace with slices.Sorted(maps.Keys(fixes)) when go.mod >= 1.23. + var keys []string + for k := range fixes { + keys = append(keys, k) + } + slices.Sort(keys) + for _, sf := range keys { + edits := fixes[sf] found := false for _, vf := range ar.Files { if vf.Name == sf { @@ -266,10 +258,17 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns } } else { // all suggested fixes are represented by a single file - + // TODO(adonovan): fix: this makes no sense if len(fixes) > 1. + // Sort map keys for determinism in tests. + // TODO(jba): replace with slices.Sorted(maps.Keys(fixes)) when go.mod >= 1.23. + var keys []string + for k := range fixes { + keys = append(keys, k) + } + slices.Sort(keys) var catchallEdits []diff.Edit - for _, edits := range fixes { - catchallEdits = append(catchallEdits, edits...) + for _, k := range keys { + catchallEdits = append(catchallEdits, fixes[k]...) } if err := applyDiffsAndCompare(orig, ar.Comment, catchallEdits, file.Name()); err != nil { diff --git a/vendor/golang.org/x/tools/go/analysis/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/checker/checker.go index 5935a62..502ec92 100644 --- a/vendor/golang.org/x/tools/go/analysis/checker/checker.go +++ b/vendor/golang.org/x/tools/go/analysis/checker/checker.go @@ -35,6 +35,7 @@ import ( "go/types" "io" "log" + "os" "reflect" "sort" "strings" @@ -55,9 +56,10 @@ type Options struct { SanityCheck bool // check fact encoding is ok and deterministic FactLog io.Writer // if non-nil, log each exported fact to it - // TODO(adonovan): add ReadFile so that an Overlay specified + // TODO(adonovan): expose ReadFile so that an Overlay specified // in the [packages.Config] can be communicated via // Pass.ReadFile to each Analyzer. + readFile analysisinternal.ReadFileFunc } // Graph holds the results of a round of analysis, including the graph @@ -335,8 +337,14 @@ func (act *Action) execOnce() { TypeErrors: act.Package.TypeErrors, Module: module, - ResultOf: inputs, - Report: func(d analysis.Diagnostic) { act.Diagnostics = append(act.Diagnostics, d) }, + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { + // Assert that SuggestedFixes are well formed. + if err := analysisinternal.ValidateFixes(act.Package.Fset, act.Analyzer, d.SuggestedFixes); err != nil { + panic(err) + } + act.Diagnostics = append(act.Diagnostics, d) + }, ImportObjectFact: act.ObjectFact, ExportObjectFact: act.exportObjectFact, ImportPackageFact: act.PackageFact, @@ -344,7 +352,11 @@ func (act *Action) execOnce() { AllObjectFacts: act.AllObjectFacts, AllPackageFacts: act.AllPackageFacts, } - pass.ReadFile = analysisinternal.MakeReadFile(pass) + readFile := os.ReadFile + if act.opts.readFile != nil { + readFile = act.opts.readFile + } + pass.ReadFile = analysisinternal.CheckedReadFile(pass, readFile) act.pass = pass act.Result, act.Err = func() (any, error) { diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go index ee083a2..f6118be 100644 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -65,7 +65,9 @@ type RelatedInformation struct { // user can choose to apply to their code. Usually the SuggestedFix is // meant to fix the issue flagged by the diagnostic. // -// The TextEdits must not overlap, nor contain edits for other packages. +// The TextEdits must not overlap, nor contain edits for other +// packages. Edits need not be totally ordered, but the order +// determines how insertions at the same point will be applied. type SuggestedFix struct { // A verb phrase describing the fix, to be shown to // a user trying to decide whether to accept it. diff --git a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go index 0c2fc5e..fb3c47b 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -17,9 +17,9 @@ import ( "flag" "fmt" "go/format" - "go/token" "io" - "io/ioutil" + "maps" + "log" "os" "runtime" @@ -31,10 +31,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/checker" + "golang.org/x/tools/go/analysis/internal" "golang.org/x/tools/go/analysis/internal/analysisflags" "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/diff" - "golang.org/x/tools/internal/robustio" ) var ( @@ -54,8 +55,12 @@ var ( // IncludeTests indicates whether test files should be analyzed too. IncludeTests = true - // Fix determines whether to apply all suggested fixes. + // Fix determines whether to apply (!Diff) or display (Diff) all suggested fixes. Fix bool + + // Diff causes the file updates to be displayed, but not applied. + // This flag has no effect unless Fix is true. + Diff bool ) // RegisterFlags registers command-line flags used by the analysis driver. @@ -71,6 +76,7 @@ func RegisterFlags() { flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too") flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes") + flag.BoolVar(&Diff, "diff", false, "with -fix, don't update the files, but print a unified diff") } // Run loads the packages specified by args using go/packages, @@ -139,6 +145,9 @@ func Run(args []string, analyzers []*analysis.Analyzer) int { return 1 } + // TODO(adonovan): simplify exit code logic by using a single + // exit code variable and applying "code = max(code, X)" each + // time an error of code X occurs. pkgsExitCode := 0 // Print package and module errors regardless of RunDespiteErrors. // Do not exit if there are errors, yet. @@ -166,13 +175,18 @@ func Run(args []string, analyzers []*analysis.Analyzer) int { return 1 } - // Apply all fixes from the root actions. + // Don't print the diagnostics, + // but apply all fixes from the root actions. if Fix { - if err := applyFixes(graph.Roots); err != nil { + if err := applyFixes(graph.Roots, Diff); err != nil { // Fail when applying fixes failed. log.Print(err) return 1 } + // TODO(adonovan): don't proceed to print the text or JSON output + // if we applied fixes; stop here. + // + // return pkgsExitCode } // Print the results. If !RunDespiteErrors and there @@ -261,7 +275,13 @@ func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { } mode |= packages.NeedModule conf := packages.Config{ - Mode: mode, + Mode: mode, + // Ensure that child process inherits correct alias of PWD. + // (See discussion at Dir field of [exec.Command].) + // However, this currently breaks some tests. + // TODO(adonovan): Investigate. + // + // Dir: os.Getenv("PWD"), Tests: IncludeTests, } initial, err := packages.Load(&conf, patterns...) @@ -271,181 +291,257 @@ func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { return initial, err } -// applyFixes applies suggested fixes associated with diagnostics -// reported by the specified actions. It verifies that edits do not -// conflict, even through file-system level aliases such as symbolic -// links, and then edits the files. -func applyFixes(actions []*checker.Action) error { - // Visit all of the actions and accumulate the suggested edits. - paths := make(map[robustio.FileID]string) - editsByAction := make(map[robustio.FileID]map[*checker.Action][]diff.Edit) +// applyFixes attempts to apply the first suggested fix associated +// with each diagnostic reported by the specified actions. +// All fixes must have been validated by [analysisinternal.ValidateFixes]. +// +// Each fix is treated as an independent change; fixes are merged in +// an arbitrary deterministic order as if by a three-way diff tool +// such as the UNIX diff3 command or 'git merge'. Any fix that cannot be +// cleanly merged is discarded, in which case the final summary tells +// the user to re-run the tool. +// TODO(adonovan): make the checker tool re-run the analysis itself. +// +// When the same file is analyzed as a member of both a primary +// package "p" and a test-augmented package "p [p.test]", there may be +// duplicate diagnostics and fixes. One set of fixes will be applied +// and the other will be discarded; but re-running the tool may then +// show zero fixes, which may cause the confused user to wonder what +// happened to the other ones. +// TODO(adonovan): consider pre-filtering completely identical fixes. +// +// A common reason for overlapping fixes is duplicate additions of the +// same import. The merge algorithm may often cleanly resolve such +// fixes, coalescing identical edits, but the merge may sometimes be +// confused by nearby changes. +// +// Even when merging succeeds, there is no guarantee that the +// composition of the two fixes is semantically correct. Coalescing +// identical edits is appropriate for imports, but not for, say, +// increments to a counter variable; the correct resolution in that +// case might be to increment it twice. Or consider two fixes that +// each delete the penultimate reference to an import or local +// variable: each fix is sound individually, and they may be textually +// distant from each other, but when both are applied, the program is +// no longer valid because it has an unreferenced import or local +// variable. +// TODO(adonovan): investigate replacing the final "gofmt" step with a +// formatter that applies the unused-import deletion logic of +// "goimports". +// +// Merging depends on both the order of fixes and they order of edits +// within them. For example, if three fixes add import "a" twice and +// import "b" once, the two imports of "a" may be combined if they +// appear in order [a, a, b], or not if they appear as [a, b, a]. +// TODO(adonovan): investigate an algebraic approach to imports; +// that is, for fixes to Go source files, convert changes within the +// import(...) portion of the file into semantic edits, compose those +// edits algebraically, then convert the result back to edits. +// +// applyFixes returns success if all fixes are valid, could be cleanly +// merged, and the corresponding files were successfully updated. +// +// If showDiff, instead of updating the files it display the final +// patch composed of all the cleanly merged fixes. +// +// TODO(adonovan): handle file-system level aliases such as symbolic +// links using robustio.FileID. +func applyFixes(actions []*checker.Action, showDiff bool) error { + + // Select fixes to apply. + // + // If there are several for a given Diagnostic, choose the first. + // Preserve the order of iteration, for determinism. + type fixact struct { + fix *analysis.SuggestedFix + act *checker.Action + } + var fixes []*fixact for _, act := range actions { - editsForTokenFile := make(map[*token.File][]diff.Edit) for _, diag := range act.Diagnostics { - for _, sf := range diag.SuggestedFixes { - for _, edit := range sf.TextEdits { - // Validate the edit. - // Any error here indicates a bug in the analyzer. - start, end := edit.Pos, edit.End - file := act.Package.Fset.File(start) - if file == nil { - return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)", - act.Analyzer.Name, edit.Pos) - } - if !end.IsValid() { - end = start - } - if start > end { - return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)", - act.Analyzer.Name, edit.Pos, edit.End) - } - if eof := token.Pos(file.Base() + file.Size()); end > eof { - return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)", - act.Analyzer.Name, edit.End, eof) - } - edit := diff.Edit{ - Start: file.Offset(start), - End: file.Offset(end), - New: string(edit.NewText), - } - editsForTokenFile[file] = append(editsForTokenFile[file], edit) + for i := range diag.SuggestedFixes { + fix := &diag.SuggestedFixes[i] + if i == 0 { + fixes = append(fixes, &fixact{fix, act}) + } else { + // TODO(adonovan): abstract the logger. + log.Printf("%s: ignoring alternative fix %q", act, fix.Message) } } } + } - for f, edits := range editsForTokenFile { - id, _, err := robustio.GetFileID(f.Name()) + // Read file content on demand, from the virtual + // file system that fed the analyzer (see #62292). + // + // This cache assumes that all successful reads for the same + // file name return the same content. + // (It is tempting to group fixes by package and do the + // merge/apply/format steps one package at a time, but + // packages are not disjoint, due to test variants, so this + // would not really address the issue.) + baselineContent := make(map[string][]byte) + getBaseline := func(readFile analysisinternal.ReadFileFunc, filename string) ([]byte, error) { + content, ok := baselineContent[filename] + if !ok { + var err error + content, err = readFile(filename) if err != nil { - return err - } - if _, hasId := paths[id]; !hasId { - paths[id] = f.Name() - editsByAction[id] = make(map[*checker.Action][]diff.Edit) + return nil, err } - editsByAction[id][act] = edits + baselineContent[filename] = content } + return content, nil } - // Validate and group the edits to each actual file. - editsByPath := make(map[string][]diff.Edit) - for id, actToEdits := range editsByAction { - path := paths[id] - actions := make([]*checker.Action, 0, len(actToEdits)) - for act := range actToEdits { - actions = append(actions, act) - } + // Apply each fix, updating the current state + // only if the entire fix can be cleanly merged. + accumulatedEdits := make(map[string][]diff.Edit) + goodFixes := 0 +fixloop: + for _, fixact := range fixes { + readFile := internal.Pass(fixact.act).ReadFile + + // Convert analysis.TextEdits to diff.Edits, grouped by file. + // Precondition: a prior call to validateFix succeeded. + fileEdits := make(map[string][]diff.Edit) + fset := fixact.act.Package.Fset + for _, edit := range fixact.fix.TextEdits { + file := fset.File(edit.Pos) + + baseline, err := getBaseline(readFile, file.Name()) + if err != nil { + log.Printf("skipping fix to file %s: %v", file.Name(), err) + continue fixloop + } - // Does any action create conflicting edits? - for _, act := range actions { - edits := actToEdits[act] - if _, invalid := validateEdits(edits); invalid > 0 { - name, x, y := act.Analyzer.Name, edits[invalid-1], edits[invalid] - return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) + // We choose to treat size mismatch as a serious error, + // as it indicates a concurrent write to at least one file, + // and possibly others (consider a git checkout, for example). + if file.Size() != len(baseline) { + return fmt.Errorf("concurrent file modification detected in file %s (size changed from %d -> %d bytes); aborting fix", + file.Name(), file.Size(), len(baseline)) } + + fileEdits[file.Name()] = append(fileEdits[file.Name()], diff.Edit{ + Start: file.Offset(edit.Pos), + End: file.Offset(edit.End), + New: string(edit.NewText), + }) } - // Does any pair of different actions create edits that conflict? - for j := range actions { - for k := range actions[:j] { - x, y := actions[j], actions[k] - if x.Analyzer.Name > y.Analyzer.Name { - x, y = y, x - } - xedits, yedits := actToEdits[x], actToEdits[y] - combined := append(xedits, yedits...) - if _, invalid := validateEdits(combined); invalid > 0 { - // TODO: consider applying each action's consistent list of edits entirely, - // and then using a three-way merge (such as GNU diff3) on the resulting - // files to report more precisely the parts that actually conflict. - return diff3Conflict(path, x.Analyzer.Name, y.Analyzer.Name, xedits, yedits) + // Apply each set of edits by merging atop + // the previous accumulated state. + after := make(map[string][]diff.Edit) + for file, edits := range fileEdits { + if prev := accumulatedEdits[file]; len(prev) > 0 { + merged, ok := diff.Merge(prev, edits) + if !ok { + // debugging + if false { + log.Printf("%s: fix %s conflicts", fixact.act, fixact.fix.Message) + } + continue fixloop // conflict } + edits = merged } + after[file] = edits } - var edits []diff.Edit - for act := range actToEdits { - edits = append(edits, actToEdits[act]...) + // The entire fix applied cleanly; commit it. + goodFixes++ + maps.Copy(accumulatedEdits, after) + // debugging + if false { + log.Printf("%s: fix %s applied", fixact.act, fixact.fix.Message) } - editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. } + badFixes := len(fixes) - goodFixes - // Now we've got a set of valid edits for each file. Apply them. - // TODO(adonovan): don't abort the operation partway just because one file fails. - for path, edits := range editsByPath { - // TODO(adonovan): this should really work on the same - // gulp from the file system that fed the analyzer (see #62292). - contents, err := os.ReadFile(path) - if err != nil { - return err + // Show diff or update files to final state. + var files []string + for file := range accumulatedEdits { + files = append(files, file) + } + sort.Strings(files) // for deterministic -diff + var filesUpdated, totalFiles int + for _, file := range files { + edits := accumulatedEdits[file] + if len(edits) == 0 { + continue // the diffs annihilated (a miracle?) } - out, err := diff.ApplyBytes(contents, edits) + // Apply accumulated fixes. + baseline := baselineContent[file] // (cache hit) + final, err := diff.ApplyBytes(baseline, edits) if err != nil { - return err - } - - // Try to format the file. - if formatted, err := format.Source(out); err == nil { - out = formatted + log.Fatalf("internal error in diff.ApplyBytes: %v", err) } - if err := os.WriteFile(path, out, 0644); err != nil { - return err + // Attempt to format each file. + if formatted, err := format.Source(final); err == nil { + final = formatted } - } - return nil -} -// validateEdits returns a list of edits that is sorted and -// contains no duplicate edits. Returns the index of some -// overlapping adjacent edits if there is one and <0 if the -// edits are valid. -func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { - if len(edits) == 0 { - return nil, -1 - } - equivalent := func(x, y diff.Edit) bool { - return x.Start == y.Start && x.End == y.End && x.New == y.New - } - diff.SortEdits(edits) - unique := []diff.Edit{edits[0]} - invalid := -1 - for i := 1; i < len(edits); i++ { - prev, cur := edits[i-1], edits[i] - // We skip over equivalent edits without considering them - // an error. This handles identical edits coming from the - // multiple ways of loading a package into a - // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". - if !equivalent(prev, cur) { - unique = append(unique, cur) - if prev.End > cur.Start { - invalid = i + if showDiff { + // Since we formatted the file, we need to recompute the diff. + unified := diff.Unified(file+" (old)", file+" (new)", string(baseline), string(final)) + // TODO(adonovan): abstract the I/O. + os.Stdout.WriteString(unified) + + } else { + // write + totalFiles++ + // TODO(adonovan): abstract the I/O. + if err := os.WriteFile(file, final, 0644); err != nil { + log.Println(err) + continue } + filesUpdated++ } } - return unique, invalid -} - -// diff3Conflict returns an error describing two conflicting sets of -// edits on a file at path. -func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error { - contents, err := ioutil.ReadFile(path) - if err != nil { - return err - } - oldlabel, old := "base", string(contents) - xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines) - if err != nil { - return err - } - ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines) - if err != nil { - return err + // TODO(adonovan): consider returning a structured result that + // maps each SuggestedFix to its status: + // - invalid + // - secondary, not selected + // - applied + // - had conflicts. + // and a mapping from each affected file to: + // - its final/original content pair, and + // - whether formatting was successful. + // Then file writes and the UI can be applied by the caller + // in whatever form they like. + + // If victory was incomplete, report an error that indicates partial progress. + // + // badFixes > 0 indicates that we decided not to attempt some + // fixes due to conflicts or failure to read the source; still + // it's a relatively benign situation since the user can + // re-run the tool, and we may still make progress. + // + // filesUpdated < totalFiles indicates that some file updates + // failed. This should be rare, but is a serious error as it + // may apply half a fix, or leave the files in a bad state. + // + // These numbers are potentially misleading: + // The denominator includes duplicate conflicting fixes due to + // common files in packages "p" and "p [p.test]", which may + // have been fixed fixed and won't appear in the re-run. + // TODO(adonovan): eliminate identical fixes as an initial + // filtering step. + // + // TODO(adonovan): should we log that n files were updated in case of total victory? + if badFixes > 0 || filesUpdated < totalFiles { + if showDiff { + return fmt.Errorf("%d of %d fixes skipped (e.g. due to conflicts)", badFixes, len(fixes)) + } else { + return fmt.Errorf("applied %d of %d fixes; %d files updated. (Re-run the command to apply more.)", + goodFixes, len(fixes), filesUpdated) + } } - return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", - xlabel, ylabel, path, xdiff, ydiff) + return nil } // needFacts reports whether any analysis required by the specified set diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index 1a9b309..82c3db6 100644 --- a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -367,17 +367,26 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re } pass := &analysis.Pass{ - Analyzer: a, - Fset: fset, - Files: files, - OtherFiles: cfg.NonGoFiles, - IgnoredFiles: cfg.IgnoredFiles, - Pkg: pkg, - TypesInfo: info, - TypesSizes: tc.Sizes, - TypeErrors: nil, // unitchecker doesn't RunDespiteErrors - ResultOf: inputs, - Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + Analyzer: a, + Fset: fset, + Files: files, + OtherFiles: cfg.NonGoFiles, + IgnoredFiles: cfg.IgnoredFiles, + Pkg: pkg, + TypesInfo: info, + TypesSizes: tc.Sizes, + TypeErrors: nil, // unitchecker doesn't RunDespiteErrors + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { + // Unitchecker doesn't apply fixes, but it does report them in the JSON output. + if err := analysisinternal.ValidateFixes(fset, a, d.SuggestedFixes); err != nil { + // Since we have diagnostics, the exit code will be nonzero, + // so logging these errors is sufficient. + log.Println(err) + d.SuggestedFixes = nil + } + act.diagnostics = append(act.diagnostics, d) + }, ImportObjectFact: facts.ImportObjectFact, ExportObjectFact: facts.ExportObjectFact, AllObjectFacts: func() []analysis.ObjectFact { return facts.AllObjectFacts(factFilter) }, @@ -386,7 +395,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) }, Module: module, } - pass.ReadFile = analysisinternal.MakeReadFile(pass) + pass.ReadFile = analysisinternal.CheckedReadFile(pass, os.ReadFile) t0 := time.Now() act.result, act.err = a.Run(pass) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index cfda893..0d5050f 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -37,6 +37,8 @@ package inspector import ( "go/ast" _ "unsafe" + + "golang.org/x/tools/internal/astutil/edge" ) // An Inspector provides methods for inspecting @@ -48,6 +50,21 @@ type Inspector struct { //go:linkname events func events(in *Inspector) []event { return in.events } +func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { + return int32(uint32(index+1)<<7 | uint32(ek)) +} + +// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within +// an []ast.Node slice) from the parent field of a pop event. +// +//go:linkname unpackEdgeKindAndIndex +func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { + // The "parent" field of a pop node holds the + // edge Kind in the lower 7 bits and the index+1 + // in the upper 25. + return edge.Kind(x & 0x7f), int(x>>7) - 1 +} + // New returns an Inspector for the specified syntax trees. func New(files []*ast.File) *Inspector { return &Inspector{traverse(files)} @@ -59,7 +76,7 @@ type event struct { node ast.Node typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events index int32 // index of corresponding push or pop event - parent int32 // index of parent's push node (defined for push nodes only) + parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only) } // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). @@ -194,49 +211,74 @@ func traverse(files []*ast.File) []event { extent += int(f.End() - f.Pos()) } // This estimate is based on the net/http package. - capacity := extent * 33 / 100 - if capacity > 1e6 { - capacity = 1e6 // impose some reasonable maximum + capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M) + + v := &visitor{ + events: make([]event, 0, capacity), + stack: []item{{index: -1}}, // include an extra event so file nodes have a parent } - events := make([]event, 0, capacity) + for _, file := range files { + walk(v, edge.Invalid, -1, file) + } + return v.events +} - var stack []event - stack = append(stack, event{index: -1}) // include an extra event so file nodes have a parent - for _, f := range files { - ast.Inspect(f, func(n ast.Node) bool { - if n != nil { - // push - ev := event{ - node: n, - typ: 0, // temporarily used to accumulate type bits of subtree - index: int32(len(events)), // push event temporarily holds own index - parent: stack[len(stack)-1].index, - } - stack = append(stack, ev) - events = append(events, ev) +type visitor struct { + events []event + stack []item +} - // 2B nodes ought to be enough for anyone! - if int32(len(events)) < 0 { - panic("event index exceeded int32") - } - } else { - // pop - top := len(stack) - 1 - ev := stack[top] - typ := typeOf(ev.node) - push := ev.index - parent := top - 1 - - events[push].typ = typ // set type of push - stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. - events[push].index = int32(len(events)) // make push refer to pop - - stack = stack[:top] - events = append(events, ev) - } - return true - }) +type item struct { + index int32 // index of current node's push event + parentIndex int32 // index of parent node's push event + typAccum uint64 // accumulated type bits of current node's descendents + edgeKindAndIndex int32 // edge.Kind and index, bit packed +} + +func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) { + var ( + index = int32(len(v.events)) + parentIndex = v.stack[len(v.stack)-1].index + ) + v.events = append(v.events, event{ + node: node, + parent: parentIndex, + typ: typeOf(node), + index: 0, // (pop index is set later by visitor.pop) + }) + v.stack = append(v.stack, item{ + index: index, + parentIndex: parentIndex, + edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex), + }) + + // 2B nodes ought to be enough for anyone! + if int32(len(v.events)) < 0 { + panic("event index exceeded int32") + } + + // 32M elements in an []ast.Node ought to be enough for anyone! + if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex { + panic("Node slice index exceeded uint25") } +} + +func (v *visitor) pop(node ast.Node) { + top := len(v.stack) - 1 + current := v.stack[top] + + push := &v.events[current.index] + parent := &v.stack[top-1] + + push.index = int32(len(v.events)) // make push event refer to pop + parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent + + v.stack = v.stack[:top] - return events + v.events = append(v.events, event{ + node: node, + typ: current.typAccum, + index: current.index, + parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex] + }) } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 40b1bfd..9778448 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -219,7 +219,7 @@ func typeOf(n ast.Node) uint64 { //go:linkname maskOf func maskOf(nodes []ast.Node) uint64 { - if nodes == nil { + if len(nodes) == 0 { return math.MaxUint64 // match all node types } var mask uint64 diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go new file mode 100644 index 0000000..5a42174 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -0,0 +1,341 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file is a fork of ast.Inspect to reduce unnecessary dynamic +// calls and to gather edge information. +// +// Consistency with the original is ensured by TestInspectAllNodes. + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/internal/astutil/edge" +) + +func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { + for i, node := range list { + walk(v, ek, i, node) + } +} + +func walk(v *visitor, ek edge.Kind, index int, node ast.Node) { + v.push(ek, index, node) + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in ast.go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + walkList(v, edge.CommentGroup_List, n.List) + + case *ast.Field: + if n.Doc != nil { + walk(v, edge.Field_Doc, -1, n.Doc) + } + walkList(v, edge.Field_Names, n.Names) + if n.Type != nil { + walk(v, edge.Field_Type, -1, n.Type) + } + if n.Tag != nil { + walk(v, edge.Field_Tag, -1, n.Tag) + } + if n.Comment != nil { + walk(v, edge.Field_Comment, -1, n.Comment) + } + + case *ast.FieldList: + walkList(v, edge.FieldList_List, n.List) + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + if n.Elt != nil { + walk(v, edge.Ellipsis_Elt, -1, n.Elt) + } + + case *ast.FuncLit: + walk(v, edge.FuncLit_Type, -1, n.Type) + walk(v, edge.FuncLit_Body, -1, n.Body) + + case *ast.CompositeLit: + if n.Type != nil { + walk(v, edge.CompositeLit_Type, -1, n.Type) + } + walkList(v, edge.CompositeLit_Elts, n.Elts) + + case *ast.ParenExpr: + walk(v, edge.ParenExpr_X, -1, n.X) + + case *ast.SelectorExpr: + walk(v, edge.SelectorExpr_X, -1, n.X) + walk(v, edge.SelectorExpr_Sel, -1, n.Sel) + + case *ast.IndexExpr: + walk(v, edge.IndexExpr_X, -1, n.X) + walk(v, edge.IndexExpr_Index, -1, n.Index) + + case *ast.IndexListExpr: + walk(v, edge.IndexListExpr_X, -1, n.X) + walkList(v, edge.IndexListExpr_Indices, n.Indices) + + case *ast.SliceExpr: + walk(v, edge.SliceExpr_X, -1, n.X) + if n.Low != nil { + walk(v, edge.SliceExpr_Low, -1, n.Low) + } + if n.High != nil { + walk(v, edge.SliceExpr_High, -1, n.High) + } + if n.Max != nil { + walk(v, edge.SliceExpr_Max, -1, n.Max) + } + + case *ast.TypeAssertExpr: + walk(v, edge.TypeAssertExpr_X, -1, n.X) + if n.Type != nil { + walk(v, edge.TypeAssertExpr_Type, -1, n.Type) + } + + case *ast.CallExpr: + walk(v, edge.CallExpr_Fun, -1, n.Fun) + walkList(v, edge.CallExpr_Args, n.Args) + + case *ast.StarExpr: + walk(v, edge.StarExpr_X, -1, n.X) + + case *ast.UnaryExpr: + walk(v, edge.UnaryExpr_X, -1, n.X) + + case *ast.BinaryExpr: + walk(v, edge.BinaryExpr_X, -1, n.X) + walk(v, edge.BinaryExpr_Y, -1, n.Y) + + case *ast.KeyValueExpr: + walk(v, edge.KeyValueExpr_Key, -1, n.Key) + walk(v, edge.KeyValueExpr_Value, -1, n.Value) + + // Types + case *ast.ArrayType: + if n.Len != nil { + walk(v, edge.ArrayType_Len, -1, n.Len) + } + walk(v, edge.ArrayType_Elt, -1, n.Elt) + + case *ast.StructType: + walk(v, edge.StructType_Fields, -1, n.Fields) + + case *ast.FuncType: + if n.TypeParams != nil { + walk(v, edge.FuncType_TypeParams, -1, n.TypeParams) + } + if n.Params != nil { + walk(v, edge.FuncType_Params, -1, n.Params) + } + if n.Results != nil { + walk(v, edge.FuncType_Results, -1, n.Results) + } + + case *ast.InterfaceType: + walk(v, edge.InterfaceType_Methods, -1, n.Methods) + + case *ast.MapType: + walk(v, edge.MapType_Key, -1, n.Key) + walk(v, edge.MapType_Value, -1, n.Value) + + case *ast.ChanType: + walk(v, edge.ChanType_Value, -1, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + walk(v, edge.DeclStmt_Decl, -1, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + walk(v, edge.LabeledStmt_Label, -1, n.Label) + walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt) + + case *ast.ExprStmt: + walk(v, edge.ExprStmt_X, -1, n.X) + + case *ast.SendStmt: + walk(v, edge.SendStmt_Chan, -1, n.Chan) + walk(v, edge.SendStmt_Value, -1, n.Value) + + case *ast.IncDecStmt: + walk(v, edge.IncDecStmt_X, -1, n.X) + + case *ast.AssignStmt: + walkList(v, edge.AssignStmt_Lhs, n.Lhs) + walkList(v, edge.AssignStmt_Rhs, n.Rhs) + + case *ast.GoStmt: + walk(v, edge.GoStmt_Call, -1, n.Call) + + case *ast.DeferStmt: + walk(v, edge.DeferStmt_Call, -1, n.Call) + + case *ast.ReturnStmt: + walkList(v, edge.ReturnStmt_Results, n.Results) + + case *ast.BranchStmt: + if n.Label != nil { + walk(v, edge.BranchStmt_Label, -1, n.Label) + } + + case *ast.BlockStmt: + walkList(v, edge.BlockStmt_List, n.List) + + case *ast.IfStmt: + if n.Init != nil { + walk(v, edge.IfStmt_Init, -1, n.Init) + } + walk(v, edge.IfStmt_Cond, -1, n.Cond) + walk(v, edge.IfStmt_Body, -1, n.Body) + if n.Else != nil { + walk(v, edge.IfStmt_Else, -1, n.Else) + } + + case *ast.CaseClause: + walkList(v, edge.CaseClause_List, n.List) + walkList(v, edge.CaseClause_Body, n.Body) + + case *ast.SwitchStmt: + if n.Init != nil { + walk(v, edge.SwitchStmt_Init, -1, n.Init) + } + if n.Tag != nil { + walk(v, edge.SwitchStmt_Tag, -1, n.Tag) + } + walk(v, edge.SwitchStmt_Body, -1, n.Body) + + case *ast.TypeSwitchStmt: + if n.Init != nil { + walk(v, edge.TypeSwitchStmt_Init, -1, n.Init) + } + walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign) + walk(v, edge.TypeSwitchStmt_Body, -1, n.Body) + + case *ast.CommClause: + if n.Comm != nil { + walk(v, edge.CommClause_Comm, -1, n.Comm) + } + walkList(v, edge.CommClause_Body, n.Body) + + case *ast.SelectStmt: + walk(v, edge.SelectStmt_Body, -1, n.Body) + + case *ast.ForStmt: + if n.Init != nil { + walk(v, edge.ForStmt_Init, -1, n.Init) + } + if n.Cond != nil { + walk(v, edge.ForStmt_Cond, -1, n.Cond) + } + if n.Post != nil { + walk(v, edge.ForStmt_Post, -1, n.Post) + } + walk(v, edge.ForStmt_Body, -1, n.Body) + + case *ast.RangeStmt: + if n.Key != nil { + walk(v, edge.RangeStmt_Key, -1, n.Key) + } + if n.Value != nil { + walk(v, edge.RangeStmt_Value, -1, n.Value) + } + walk(v, edge.RangeStmt_X, -1, n.X) + walk(v, edge.RangeStmt_Body, -1, n.Body) + + // Declarations + case *ast.ImportSpec: + if n.Doc != nil { + walk(v, edge.ImportSpec_Doc, -1, n.Doc) + } + if n.Name != nil { + walk(v, edge.ImportSpec_Name, -1, n.Name) + } + walk(v, edge.ImportSpec_Path, -1, n.Path) + if n.Comment != nil { + walk(v, edge.ImportSpec_Comment, -1, n.Comment) + } + + case *ast.ValueSpec: + if n.Doc != nil { + walk(v, edge.ValueSpec_Doc, -1, n.Doc) + } + walkList(v, edge.ValueSpec_Names, n.Names) + if n.Type != nil { + walk(v, edge.ValueSpec_Type, -1, n.Type) + } + walkList(v, edge.ValueSpec_Values, n.Values) + if n.Comment != nil { + walk(v, edge.ValueSpec_Comment, -1, n.Comment) + } + + case *ast.TypeSpec: + if n.Doc != nil { + walk(v, edge.TypeSpec_Doc, -1, n.Doc) + } + walk(v, edge.TypeSpec_Name, -1, n.Name) + if n.TypeParams != nil { + walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams) + } + walk(v, edge.TypeSpec_Type, -1, n.Type) + if n.Comment != nil { + walk(v, edge.TypeSpec_Comment, -1, n.Comment) + } + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + if n.Doc != nil { + walk(v, edge.GenDecl_Doc, -1, n.Doc) + } + walkList(v, edge.GenDecl_Specs, n.Specs) + + case *ast.FuncDecl: + if n.Doc != nil { + walk(v, edge.FuncDecl_Doc, -1, n.Doc) + } + if n.Recv != nil { + walk(v, edge.FuncDecl_Recv, -1, n.Recv) + } + walk(v, edge.FuncDecl_Name, -1, n.Name) + walk(v, edge.FuncDecl_Type, -1, n.Type) + if n.Body != nil { + walk(v, edge.FuncDecl_Body, -1, n.Body) + } + + case *ast.File: + if n.Doc != nil { + walk(v, edge.File_Doc, -1, n.Doc) + } + walk(v, edge.File_Name, -1, n.Name) + walkList(v, edge.File_Decls, n.Decls) + // don't walk n.Comments - they have been + // visited already through the individual + // nodes + + default: + // (includes *ast.Package) + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.pop(node) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0147d90..c3a59b8 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -59,10 +59,10 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://go.dev/issue/56633 +// - https://go.dev/issue/56677 +// - https://go.dev/issue/58726 +// - https://go.dev/issue/63517 type LoadMode int const ( diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 93b3090..4326114 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -257,10 +257,13 @@ func (h hasher) hash(t types.Type) uint32 { } tparams := t.TypeParams() - for i := range tparams.Len() { - h.inGenericSig = true - tparam := tparams.At(i) - hash += 7 * h.hash(tparam.Constraint()) + if n := tparams.Len(); n > 0 { + h.inGenericSig = true // affects constraints, params, and results + + for i := range n { + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } } return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 5861523..abf7081 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -8,15 +8,19 @@ package analysisinternal import ( "bytes" + "cmp" "fmt" "go/ast" + "go/printer" "go/scanner" "go/token" "go/types" - "os" pathpkg "path" + "slices" + "strings" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/typesinternal" ) func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { @@ -174,20 +178,25 @@ func equivalentTypes(want, got types.Type) bool { return types.AssignableTo(want, got) } -// MakeReadFile returns a simple implementation of the Pass.ReadFile function. -func MakeReadFile(pass *analysis.Pass) func(filename string) ([]byte, error) { +// A ReadFileFunc is a function that returns the +// contents of a file, such as [os.ReadFile]. +type ReadFileFunc = func(filename string) ([]byte, error) + +// CheckedReadFile returns a wrapper around a Pass.ReadFile +// function that performs the appropriate checks. +func CheckedReadFile(pass *analysis.Pass, readFile ReadFileFunc) ReadFileFunc { return func(filename string) ([]byte, error) { if err := CheckReadable(pass, filename); err != nil { return nil, err } - return os.ReadFile(filename) + return readFile(filename) } } // CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. func CheckReadable(pass *analysis.Pass, filename string) error { - if slicesContains(pass.OtherFiles, filename) || - slicesContains(pass.IgnoredFiles, filename) { + if slices.Contains(pass.OtherFiles, filename) || + slices.Contains(pass.IgnoredFiles, filename) { return nil } for _, f := range pass.Files { @@ -198,24 +207,21 @@ func CheckReadable(pass *analysis.Pass, filename string) error { return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) } -// TODO(adonovan): use go1.21 slices.Contains. -func slicesContains[S ~[]E, E comparable](slice S, x E) bool { - for _, elem := range slice { - if elem == x { - return true - } - } - return false -} - // AddImport checks whether this file already imports pkgpath and // that import is in scope at pos. If so, it returns the name under // which it was imported and a zero edit. Otherwise, it adds a new // import of pkgpath, using a name derived from the preferred name, -// and returns the chosen name along with the edit for the new import. +// and returns the chosen name, a prefix to be concatenated with member +// to form a qualified name, and the edit for the new import. +// +// In the special case that pkgpath is dot-imported then member, the +// identifer for which the import is being added, is consulted. If +// member is not shadowed at pos, AddImport returns (".", "", nil). +// (AddImport accepts the caller's implicit claim that the imported +// package declares member.) // // It does not mutate its arguments. -func AddImport(info *types.Info, file *ast.File, pos token.Pos, pkgpath, preferredName string) (name string, newImport []analysis.TextEdit) { +func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (name, prefix string, newImport []analysis.TextEdit) { // Find innermost enclosing lexical block. scope := info.Scopes[file].Innermost(pos) if scope == nil { @@ -225,10 +231,16 @@ func AddImport(info *types.Info, file *ast.File, pos token.Pos, pkgpath, preferr // Is there an existing import of this package? // If so, are we in its scope? (not shadowed) for _, spec := range file.Imports { - pkgname, ok := importedPkgName(info, spec) - if ok && pkgname.Imported().Path() == pkgpath { - if _, obj := scope.LookupParent(pkgname.Name(), pos); obj == pkgname { - return pkgname.Name(), nil + pkgname := info.PkgNameOf(spec) + if pkgname != nil && pkgname.Imported().Path() == pkgpath { + name = pkgname.Name() + if name == "." { + // The scope of ident must be the file scope. + if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { + return name, "", nil + } + } else if _, obj := scope.LookupParent(name, pos); obj == pkgname { + return name, name + ".", nil } } } @@ -266,22 +278,174 @@ func AddImport(info *types.Info, file *ast.File, pos token.Pos, pkgpath, preferr before = decl0.Doc } } - return newName, []analysis.TextEdit{{ + return newName, newName + ".", []analysis.TextEdit{{ Pos: before.Pos(), End: before.Pos(), NewText: []byte(newText), }} } -// importedPkgName returns the PkgName object declared by an ImportSpec. -// TODO(adonovan): use go1.22's Info.PkgNameOf. -func importedPkgName(info *types.Info, imp *ast.ImportSpec) (*types.PkgName, bool) { - var obj types.Object - if imp.Name != nil { - obj = info.Defs[imp.Name] - } else { - obj = info.Implicits[imp] +// Format returns a string representation of the expression e. +func Format(fset *token.FileSet, e ast.Expr) string { + var buf strings.Builder + printer.Fprint(&buf, fset, e) // ignore errors + return buf.String() +} + +// Imports returns true if path is imported by pkg. +func Imports(pkg *types.Package, path string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return true + } + } + return false +} + +// IsTypeNamed reports whether t is (or is an alias for) a +// package-level defined type with the given package path and one of +// the given names. It returns false if t is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { + if named, ok := types.Unalias(t).(*types.Named); ok { + tname := named.Obj() + return tname != nil && + typesinternal.IsPackageLevel(tname) && + tname.Pkg().Path() == pkgPath && + slices.Contains(names, tname.Name()) + } + return false +} + +// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a +// package-level defined type with the given package path and one of the given +// names. It returns false if t is not a pointer type. +func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { + r := typesinternal.Unpointer(t) + if r == t { + return false + } + return IsTypeNamed(r, pkgPath, names...) +} + +// IsFunctionNamed reports whether obj is a package-level function +// defined in the given package and has one of the given names. +// It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { + f, ok := obj.(*types.Func) + return ok && + typesinternal.IsPackageLevel(obj) && + f.Pkg().Path() == pkgPath && + f.Type().(*types.Signature).Recv() == nil && + slices.Contains(names, f.Name()) +} + +// IsMethodNamed reports whether obj is a method defined on a +// package-level type with the given package and type name, and has +// one of the given names. It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.TypeName.Name", +// which is important for the performance of syntax matching. +func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { + if fn, ok := obj.(*types.Func); ok { + if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + _, T := typesinternal.ReceiverNamed(recv) + return T != nil && + IsTypeNamed(T, pkgPath, typeName) && + slices.Contains(names, fn.Name()) + } } - pkgname, ok := obj.(*types.PkgName) - return pkgname, ok + return false +} + +// ValidateFixes validates the set of fixes for a single diagnostic. +// Any error indicates a bug in the originating analyzer. +// +// It updates fixes so that fixes[*].End.IsValid(). +// +// It may be used as part of an analysis driver implementation. +func ValidateFixes(fset *token.FileSet, a *analysis.Analyzer, fixes []analysis.SuggestedFix) error { + fixMessages := make(map[string]bool) + for i := range fixes { + fix := &fixes[i] + if fixMessages[fix.Message] { + return fmt.Errorf("analyzer %q suggests two fixes with same Message (%s)", a.Name, fix.Message) + } + fixMessages[fix.Message] = true + if err := validateFix(fset, fix); err != nil { + return fmt.Errorf("analyzer %q suggests invalid fix (%s): %v", a.Name, fix.Message, err) + } + } + return nil +} + +// validateFix validates a single fix. +// Any error indicates a bug in the originating analyzer. +// +// It updates fix so that fix.End.IsValid(). +func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { + + // Stably sort edits by Pos. This ordering puts insertions + // (end = start) before deletions (end > start) at the same + // point, but uses a stable sort to preserve the order of + // multiple insertions at the same point. + slices.SortStableFunc(fix.TextEdits, func(x, y analysis.TextEdit) int { + if sign := cmp.Compare(x.Pos, y.Pos); sign != 0 { + return sign + } + return cmp.Compare(x.End, y.End) + }) + + var prev *analysis.TextEdit + for i := range fix.TextEdits { + edit := &fix.TextEdits[i] + + // Validate edit individually. + start := edit.Pos + file := fset.File(start) + if file == nil { + return fmt.Errorf("missing file info for pos (%v)", edit.Pos) + } + if end := edit.End; end.IsValid() { + if end < start { + return fmt.Errorf("pos (%v) > end (%v)", edit.Pos, edit.End) + } + endFile := fset.File(end) + if endFile == nil { + return fmt.Errorf("malformed end position %v", end) + } + if endFile != file { + return fmt.Errorf("edit spans files %v and %v", file.Name(), endFile.Name()) + } + } else { + edit.End = start // update the SuggestedFix + } + if eof := token.Pos(file.Base() + file.Size()); edit.End > eof { + return fmt.Errorf("end is (%v) beyond end of file (%v)", edit.End, eof) + } + + // Validate the sequence of edits: + // properly ordered, no overlapping deletions + if prev != nil && edit.Pos < prev.End { + xpos := fset.Position(prev.Pos) + xend := fset.Position(prev.End) + ypos := fset.Position(edit.Pos) + yend := fset.Position(edit.End) + return fmt.Errorf("overlapping edits to %s (%d:%d-%d:%d and %d:%d-%d:%d)", + xpos.Filename, + xpos.Line, xpos.Column, + xend.Line, xend.Column, + ypos.Line, ypos.Column, + yend.Line, yend.Column, + ) + } + prev = edit + } + + return nil } diff --git a/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go new file mode 100644 index 0000000..4f6ccfd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edge defines identifiers for each field of an ast.Node +// struct type that refers to another Node. +package edge + +import ( + "fmt" + "go/ast" + "reflect" +) + +// A Kind describes a field of an ast.Node struct. +type Kind uint8 + +// String returns a description of the edge kind. +func (k Kind) String() string { + if k == Invalid { + return "" + } + info := fieldInfos[k] + return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name) +} + +// NodeType returns the pointer-to-struct type of the ast.Node implementation. +func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType } + +// FieldName returns the name of the field. +func (k Kind) FieldName() string { return fieldInfos[k].name } + +// FieldType returns the declared type of the field. +func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType } + +// Get returns the direct child of n identified by (k, idx). +// n's type must match k.NodeType(). +// idx must be a valid slice index, or -1 for a non-slice. +func (k Kind) Get(n ast.Node, idx int) ast.Node { + if k.NodeType() != reflect.TypeOf(n) { + panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n)) + } + v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index) + if idx != -1 { + v = v.Index(idx) // asserts valid index + } else { + // (The type assertion below asserts that v is not a slice.) + } + return v.Interface().(ast.Node) // may be nil +} + +const ( + Invalid Kind = iota // for nodes at the root of the traversal + + // Kinds are sorted alphabetically. + // Numbering is not stable. + // Each is named Type_Field, where Type is the + // ast.Node struct type and Field is the name of the field + + ArrayType_Elt + ArrayType_Len + AssignStmt_Lhs + AssignStmt_Rhs + BinaryExpr_X + BinaryExpr_Y + BlockStmt_List + BranchStmt_Label + CallExpr_Args + CallExpr_Fun + CaseClause_Body + CaseClause_List + ChanType_Value + CommClause_Body + CommClause_Comm + CommentGroup_List + CompositeLit_Elts + CompositeLit_Type + DeclStmt_Decl + DeferStmt_Call + Ellipsis_Elt + ExprStmt_X + FieldList_List + Field_Comment + Field_Doc + Field_Names + Field_Tag + Field_Type + File_Decls + File_Doc + File_Name + ForStmt_Body + ForStmt_Cond + ForStmt_Init + ForStmt_Post + FuncDecl_Body + FuncDecl_Doc + FuncDecl_Name + FuncDecl_Recv + FuncDecl_Type + FuncLit_Body + FuncLit_Type + FuncType_Params + FuncType_Results + FuncType_TypeParams + GenDecl_Doc + GenDecl_Specs + GoStmt_Call + IfStmt_Body + IfStmt_Cond + IfStmt_Else + IfStmt_Init + ImportSpec_Comment + ImportSpec_Doc + ImportSpec_Name + ImportSpec_Path + IncDecStmt_X + IndexExpr_Index + IndexExpr_X + IndexListExpr_Indices + IndexListExpr_X + InterfaceType_Methods + KeyValueExpr_Key + KeyValueExpr_Value + LabeledStmt_Label + LabeledStmt_Stmt + MapType_Key + MapType_Value + ParenExpr_X + RangeStmt_Body + RangeStmt_Key + RangeStmt_Value + RangeStmt_X + ReturnStmt_Results + SelectStmt_Body + SelectorExpr_Sel + SelectorExpr_X + SendStmt_Chan + SendStmt_Value + SliceExpr_High + SliceExpr_Low + SliceExpr_Max + SliceExpr_X + StarExpr_X + StructType_Fields + SwitchStmt_Body + SwitchStmt_Init + SwitchStmt_Tag + TypeAssertExpr_Type + TypeAssertExpr_X + TypeSpec_Comment + TypeSpec_Doc + TypeSpec_Name + TypeSpec_Type + TypeSpec_TypeParams + TypeSwitchStmt_Assign + TypeSwitchStmt_Body + TypeSwitchStmt_Init + UnaryExpr_X + ValueSpec_Comment + ValueSpec_Doc + ValueSpec_Names + ValueSpec_Type + ValueSpec_Values + + maxKind +) + +// Assert that the encoding fits in 7 bits, +// as the inspector relies on this. +// (We are currently at 104.) +var _ = [1 << 7]struct{}{}[maxKind] + +type fieldInfo struct { + nodeType reflect.Type // pointer-to-struct type of ast.Node implementation + name string + index int + fieldType reflect.Type +} + +func info[N ast.Node](fieldName string) fieldInfo { + nodePtrType := reflect.TypeFor[N]() + f, ok := nodePtrType.Elem().FieldByName(fieldName) + if !ok { + panic(fieldName) + } + return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type} +} + +var fieldInfos = [...]fieldInfo{ + Invalid: {}, + ArrayType_Elt: info[*ast.ArrayType]("Elt"), + ArrayType_Len: info[*ast.ArrayType]("Len"), + AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"), + AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"), + BinaryExpr_X: info[*ast.BinaryExpr]("X"), + BinaryExpr_Y: info[*ast.BinaryExpr]("Y"), + BlockStmt_List: info[*ast.BlockStmt]("List"), + BranchStmt_Label: info[*ast.BranchStmt]("Label"), + CallExpr_Args: info[*ast.CallExpr]("Args"), + CallExpr_Fun: info[*ast.CallExpr]("Fun"), + CaseClause_Body: info[*ast.CaseClause]("Body"), + CaseClause_List: info[*ast.CaseClause]("List"), + ChanType_Value: info[*ast.ChanType]("Value"), + CommClause_Body: info[*ast.CommClause]("Body"), + CommClause_Comm: info[*ast.CommClause]("Comm"), + CommentGroup_List: info[*ast.CommentGroup]("List"), + CompositeLit_Elts: info[*ast.CompositeLit]("Elts"), + CompositeLit_Type: info[*ast.CompositeLit]("Type"), + DeclStmt_Decl: info[*ast.DeclStmt]("Decl"), + DeferStmt_Call: info[*ast.DeferStmt]("Call"), + Ellipsis_Elt: info[*ast.Ellipsis]("Elt"), + ExprStmt_X: info[*ast.ExprStmt]("X"), + FieldList_List: info[*ast.FieldList]("List"), + Field_Comment: info[*ast.Field]("Comment"), + Field_Doc: info[*ast.Field]("Doc"), + Field_Names: info[*ast.Field]("Names"), + Field_Tag: info[*ast.Field]("Tag"), + Field_Type: info[*ast.Field]("Type"), + File_Decls: info[*ast.File]("Decls"), + File_Doc: info[*ast.File]("Doc"), + File_Name: info[*ast.File]("Name"), + ForStmt_Body: info[*ast.ForStmt]("Body"), + ForStmt_Cond: info[*ast.ForStmt]("Cond"), + ForStmt_Init: info[*ast.ForStmt]("Init"), + ForStmt_Post: info[*ast.ForStmt]("Post"), + FuncDecl_Body: info[*ast.FuncDecl]("Body"), + FuncDecl_Doc: info[*ast.FuncDecl]("Doc"), + FuncDecl_Name: info[*ast.FuncDecl]("Name"), + FuncDecl_Recv: info[*ast.FuncDecl]("Recv"), + FuncDecl_Type: info[*ast.FuncDecl]("Type"), + FuncLit_Body: info[*ast.FuncLit]("Body"), + FuncLit_Type: info[*ast.FuncLit]("Type"), + FuncType_Params: info[*ast.FuncType]("Params"), + FuncType_Results: info[*ast.FuncType]("Results"), + FuncType_TypeParams: info[*ast.FuncType]("TypeParams"), + GenDecl_Doc: info[*ast.GenDecl]("Doc"), + GenDecl_Specs: info[*ast.GenDecl]("Specs"), + GoStmt_Call: info[*ast.GoStmt]("Call"), + IfStmt_Body: info[*ast.IfStmt]("Body"), + IfStmt_Cond: info[*ast.IfStmt]("Cond"), + IfStmt_Else: info[*ast.IfStmt]("Else"), + IfStmt_Init: info[*ast.IfStmt]("Init"), + ImportSpec_Comment: info[*ast.ImportSpec]("Comment"), + ImportSpec_Doc: info[*ast.ImportSpec]("Doc"), + ImportSpec_Name: info[*ast.ImportSpec]("Name"), + ImportSpec_Path: info[*ast.ImportSpec]("Path"), + IncDecStmt_X: info[*ast.IncDecStmt]("X"), + IndexExpr_Index: info[*ast.IndexExpr]("Index"), + IndexExpr_X: info[*ast.IndexExpr]("X"), + IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"), + IndexListExpr_X: info[*ast.IndexListExpr]("X"), + InterfaceType_Methods: info[*ast.InterfaceType]("Methods"), + KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"), + KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"), + LabeledStmt_Label: info[*ast.LabeledStmt]("Label"), + LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"), + MapType_Key: info[*ast.MapType]("Key"), + MapType_Value: info[*ast.MapType]("Value"), + ParenExpr_X: info[*ast.ParenExpr]("X"), + RangeStmt_Body: info[*ast.RangeStmt]("Body"), + RangeStmt_Key: info[*ast.RangeStmt]("Key"), + RangeStmt_Value: info[*ast.RangeStmt]("Value"), + RangeStmt_X: info[*ast.RangeStmt]("X"), + ReturnStmt_Results: info[*ast.ReturnStmt]("Results"), + SelectStmt_Body: info[*ast.SelectStmt]("Body"), + SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"), + SelectorExpr_X: info[*ast.SelectorExpr]("X"), + SendStmt_Chan: info[*ast.SendStmt]("Chan"), + SendStmt_Value: info[*ast.SendStmt]("Value"), + SliceExpr_High: info[*ast.SliceExpr]("High"), + SliceExpr_Low: info[*ast.SliceExpr]("Low"), + SliceExpr_Max: info[*ast.SliceExpr]("Max"), + SliceExpr_X: info[*ast.SliceExpr]("X"), + StarExpr_X: info[*ast.StarExpr]("X"), + StructType_Fields: info[*ast.StructType]("Fields"), + SwitchStmt_Body: info[*ast.SwitchStmt]("Body"), + SwitchStmt_Init: info[*ast.SwitchStmt]("Init"), + SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"), + TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"), + TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"), + TypeSpec_Comment: info[*ast.TypeSpec]("Comment"), + TypeSpec_Doc: info[*ast.TypeSpec]("Doc"), + TypeSpec_Name: info[*ast.TypeSpec]("Name"), + TypeSpec_Type: info[*ast.TypeSpec]("Type"), + TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"), + TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"), + TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"), + TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"), + UnaryExpr_X: info[*ast.UnaryExpr]("X"), + ValueSpec_Comment: info[*ast.ValueSpec]("Comment"), + ValueSpec_Doc: info[*ast.ValueSpec]("Doc"), + ValueSpec_Names: info[*ast.ValueSpec]("Names"), + ValueSpec_Type: info[*ast.ValueSpec]("Type"), + ValueSpec_Values: info[*ast.ValueSpec]("Values"), +} diff --git a/vendor/golang.org/x/tools/internal/diff/merge.go b/vendor/golang.org/x/tools/internal/diff/merge.go new file mode 100644 index 0000000..eeae98a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/merge.go @@ -0,0 +1,81 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "slices" +) + +// Merge merges two valid, ordered lists of edits. +// It returns zero if there was a conflict. +// +// If corresponding edits in x and y are identical, +// they are coalesced in the result. +// +// If x and y both provide different insertions at the same point, +// the insertions from x will be first in the result. +// +// TODO(adonovan): this algorithm could be improved, for example by +// working harder to coalesce non-identical edits that share a common +// deletion or common prefix of insertion (see the tests). +// Survey the academic literature for insights. +func Merge(x, y []Edit) ([]Edit, bool) { + // Make a defensive (premature) copy of the arrays. + x = slices.Clone(x) + y = slices.Clone(y) + + var merged []Edit + add := func(edit Edit) { + merged = append(merged, edit) + } + var xi, yi int + for xi < len(x) && yi < len(y) { + px := &x[xi] + py := &y[yi] + + if *px == *py { + // x and y are identical: coalesce. + add(*px) + xi++ + yi++ + + } else if px.End <= py.Start { + // x is entirely before y, + // or an insertion at start of y. + add(*px) + xi++ + + } else if py.End <= px.Start { + // y is entirely before x, + // or an insertion at start of x. + add(*py) + yi++ + + } else if px.Start < py.Start { + // x is partly before y: + // split it into a deletion and an edit. + add(Edit{px.Start, py.Start, ""}) + px.Start = py.Start + + } else if py.Start < px.Start { + // y is partly before x: + // split it into a deletion and an edit. + add(Edit{py.Start, px.Start, ""}) + py.Start = px.Start + + } else { + // x and y are unequal non-insertions + // at the same point: conflict. + return nil, false + } + } + for ; xi < len(x); xi++ { + add(x[xi]) + } + for ; yi < len(y); yi++ { + add(y[yi]) + } + return merged, true +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 69b1d69..1294392 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -671,7 +671,9 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - r.declare(types.NewVar(pos, r.currPkg, name, typ)) + v := types.NewVar(pos, r.currPkg, name, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + r.declare(v) default: errorf("unexpected tag: %v", tag) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 6cdab44..522287d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -14,6 +14,7 @@ import ( "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" + "golang.org/x/tools/internal/typesinternal" ) // A pkgReader holds the shared state for reading a unified IR package @@ -572,6 +573,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { sig := fn.Type().(*types.Signature) recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + typesinternal.SetVarKind(recv, typesinternal.RecvVar) methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) } @@ -619,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjVar: pos := r.pos() typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) + v := types.NewVar(pos, objPkg, objName, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + declare(v) } } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index e333efc..7ea9013 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -28,7 +28,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// An Runner will run go command invocations and serialize +// A Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { // once guards the runner initialization. @@ -179,7 +179,7 @@ type Invocation struct { CleanEnv bool Env []string WorkingDir string - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) } // Postcondition: both error results have same nilness. @@ -388,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(startTime, cmd) + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) case <-ctx.Done(): } } else { @@ -413,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } // Didn't shut down in response to interrupt. Kill it hard. - // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT - // on certain platforms, such as unix. if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } @@ -422,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { switch runtime.GOOS { - case "linux", "darwin", "freebsd", "netbsd": + case "linux", "darwin", "freebsd", "netbsd", "openbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND -The gopls test runner has detected a hanging go command. In order to debug -this, the output of ps and lsof/fstat is printed below. + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. -See golang/go#54461 for more details.`) + See golang/go#54461 for more details.`) fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") fmt.Fprintln(os.Stderr, "-------------------------") @@ -438,7 +440,7 @@ See golang/go#54461 for more details.`) psCmd.Stdout = os.Stderr psCmd.Stderr = os.Stderr if err := psCmd.Run(); err != nil { - panic(fmt.Sprintf("running ps: %v", err)) + log.Printf("Handling hanging Go command: running ps: %v", err) } listFiles := "lsof" @@ -452,10 +454,24 @@ See golang/go#54461 for more details.`) listFilesCmd.Stdout = os.Stderr listFilesCmd.Stderr = os.Stderr if err := listFilesCmd.Run(); err != nil { - panic(fmt.Sprintf("running %s: %v", listFiles, err)) + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) } } - panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) } func cmdDebugStr(cmd *exec.Cmd) string { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 0000000..469c648 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 0000000..169d37c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 66e69b4..7846059 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,7 +5,7 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -var GetDepsErrors = func(p interface{}) []*PackageError { return nil } +var GetDepsErrors = func(p any) []*PackageError { return nil } type PackageError struct { ImportStack []string // shortest path from package named on command line to this one @@ -16,5 +16,5 @@ type PackageError struct { var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -var SetModFlag = func(config interface{}, value string) {} +var SetModFlag = func(config any, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go b/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go deleted file mode 100644 index 949f278..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package robustio - -import "syscall" - -// The robustio package is copied from cmd/go/internal/robustio, a package used -// by the go command to retry known flaky operations on certain operating systems. - -//go:generate go run copyfiles.go - -// Since the gopls module cannot access internal/syscall/windows, copy a -// necessary constant. -const ERROR_SHARING_VIOLATION syscall.Errno = 32 diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio.go b/vendor/golang.org/x/tools/internal/robustio/robustio.go deleted file mode 100644 index 0a559fc..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/robustio.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package robustio wraps I/O functions that are prone to failure on Windows, -// transparently retrying errors up to an arbitrary timeout. -// -// Errors are classified heuristically and retries are bounded, so the functions -// in this package do not completely eliminate spurious errors. However, they do -// significantly reduce the rate of failure in practice. -// -// If so, the error will likely wrap one of: -// The functions in this package do not completely eliminate spurious errors, -// but substantially reduce their rate of occurrence in practice. -package robustio - -import "time" - -// Rename is like os.Rename, but on Windows retries errors that may occur if the -// file is concurrently read or overwritten. -// -// (See golang.org/issue/31247 and golang.org/issue/32188.) -func Rename(oldpath, newpath string) error { - return rename(oldpath, newpath) -} - -// ReadFile is like os.ReadFile, but on Windows retries errors that may -// occur if the file is concurrently replaced. -// -// (See golang.org/issue/31247 and golang.org/issue/32188.) -func ReadFile(filename string) ([]byte, error) { - return readFile(filename) -} - -// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur -// if an executable file in the directory has recently been executed. -// -// (See golang.org/issue/19491.) -func RemoveAll(path string) error { - return removeAll(path) -} - -// IsEphemeralError reports whether err is one of the errors that the functions -// in this package attempt to mitigate. -// -// Errors considered ephemeral include: -// - syscall.ERROR_ACCESS_DENIED -// - syscall.ERROR_FILE_NOT_FOUND -// - internal/syscall/windows.ERROR_SHARING_VIOLATION -// -// This set may be expanded in the future; programs must not rely on the -// non-ephemerality of any given error. -func IsEphemeralError(err error) bool { - return isEphemeralError(err) -} - -// A FileID uniquely identifies a file in the file system. -// -// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file -// names denote the same file. -// A FileID is comparable, and thus suitable for use as a map key. -type FileID struct { - device, inode uint64 -} - -// GetFileID returns the file system's identifier for the file, and its -// modification time. -// Like os.Stat, it reads through symbolic links. -func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) } diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go b/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go deleted file mode 100644 index 99fd8eb..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package robustio - -import ( - "errors" - "syscall" -) - -const errFileNotFound = syscall.ENOENT - -// isEphemeralError returns true if err may be resolved by waiting. -func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { - return errno == errFileNotFound - } - return false -} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go b/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go deleted file mode 100644 index d5c2418..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows || darwin -// +build windows darwin - -package robustio - -import ( - "errors" - "math/rand" - "os" - "syscall" - "time" -) - -const arbitraryTimeout = 2000 * time.Millisecond - -// retry retries ephemeral errors from f up to an arbitrary timeout -// to work around filesystem flakiness on Windows and Darwin. -func retry(f func() (err error, mayRetry bool)) error { - var ( - bestErr error - lowestErrno syscall.Errno - start time.Time - nextSleep time.Duration = 1 * time.Millisecond - ) - for { - err, mayRetry := f() - if err == nil || !mayRetry { - return err - } - - var errno syscall.Errno - if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { - bestErr = err - lowestErrno = errno - } else if bestErr == nil { - bestErr = err - } - - if start.IsZero() { - start = time.Now() - } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { - break - } - time.Sleep(nextSleep) - nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) - } - - return bestErr -} - -// rename is like os.Rename, but retries ephemeral errors. -// -// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with -// MOVEFILE_REPLACE_EXISTING. -// -// Windows also provides a different system call, ReplaceFile, -// that provides similar semantics, but perhaps preserves more metadata. (The -// documentation on the differences between the two is very sparse.) -// -// Empirical error rates with MoveFileEx are lower under modest concurrency, so -// for now we're sticking with what the os package already provides. -func rename(oldpath, newpath string) (err error) { - return retry(func() (err error, mayRetry bool) { - err = os.Rename(oldpath, newpath) - return err, isEphemeralError(err) - }) -} - -// readFile is like os.ReadFile, but retries ephemeral errors. -func readFile(filename string) ([]byte, error) { - var b []byte - err := retry(func() (err error, mayRetry bool) { - b, err = os.ReadFile(filename) - - // Unlike in rename, we do not retry errFileNotFound here: it can occur - // as a spurious error, but the file may also genuinely not exist, so the - // increase in robustness is probably not worth the extra latency. - return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) - }) - return b, err -} - -func removeAll(path string) error { - return retry(func() (err error, mayRetry bool) { - err = os.RemoveAll(path) - return err, isEphemeralError(err) - }) -} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_other.go b/vendor/golang.org/x/tools/internal/robustio/robustio_other.go deleted file mode 100644 index 3a20cac..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/robustio_other.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows && !darwin -// +build !windows,!darwin - -package robustio - -import ( - "os" -) - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func readFile(filename string) ([]byte, error) { - return os.ReadFile(filename) -} - -func removeAll(path string) error { - return os.RemoveAll(path) -} - -func isEphemeralError(err error) bool { - return false -} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go b/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go deleted file mode 100644 index 9fa4cac..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 -// +build plan9 - -package robustio - -import ( - "os" - "syscall" - "time" -) - -func getFileID(filename string) (FileID, time.Time, error) { - fi, err := os.Stat(filename) - if err != nil { - return FileID{}, time.Time{}, err - } - dir := fi.Sys().(*syscall.Dir) - return FileID{ - device: uint64(dir.Type)<<32 | uint64(dir.Dev), - inode: dir.Qid.Path, - }, fi.ModTime(), nil -} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go b/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go deleted file mode 100644 index cf74865..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows && !plan9 -// +build !windows,!plan9 - -package robustio - -import ( - "os" - "syscall" - "time" -) - -func getFileID(filename string) (FileID, time.Time, error) { - fi, err := os.Stat(filename) - if err != nil { - return FileID{}, time.Time{}, err - } - stat := fi.Sys().(*syscall.Stat_t) - return FileID{ - device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux) - inode: stat.Ino, - }, fi.ModTime(), nil -} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go b/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go deleted file mode 100644 index 616c328..0000000 --- a/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package robustio - -import ( - "errors" - "syscall" - "time" -) - -const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND - -// isEphemeralError returns true if err may be resolved by waiting. -func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { - switch errno { - case syscall.ERROR_ACCESS_DENIED, - syscall.ERROR_FILE_NOT_FOUND, - ERROR_SHARING_VIOLATION: - return true - } - } - return false -} - -// Note: it may be convenient to have this helper return fs.FileInfo, but -// implementing this is actually quite involved on Windows. Since we only -// currently use mtime, keep it simple. -func getFileID(filename string) (FileID, time.Time, error) { - filename16, err := syscall.UTF16PtrFromString(filename) - if err != nil { - return FileID{}, time.Time{}, err - } - h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0) - if err != nil { - return FileID{}, time.Time{}, err - } - defer syscall.CloseHandle(h) - var i syscall.ByHandleFileInformation - if err := syscall.GetFileInformationByHandle(h, &i); err != nil { - return FileID{}, time.Time{}, err - } - mtime := time.Unix(0, i.LastWriteTime.Nanoseconds()) - return FileID{ - device: uint64(i.VolumeSerialNumber), - inode: uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow), - }, mtime, nil -} diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv.go b/vendor/golang.org/x/tools/internal/testenv/testenv.go index d217e28..144f4f8 100644 --- a/vendor/golang.org/x/tools/internal/testenv/testenv.go +++ b/vendor/golang.org/x/tools/internal/testenv/testenv.go @@ -7,10 +7,12 @@ package testenv import ( + "bufio" "bytes" "context" "fmt" "go/build" + "log" "os" "os/exec" "path/filepath" @@ -553,3 +555,45 @@ func NeedsGOROOTDir(t *testing.T, dir string) { } } } + +// RedirectStderr causes os.Stderr (and the global logger) to be +// temporarily replaced so that writes to it are sent to t.Log. +// It is restored at test cleanup. +func RedirectStderr(t testing.TB) { + t.Setenv("RedirectStderr", "") // side effect: assert t.Parallel wasn't called + + // TODO(adonovan): if https://go.dev/issue/59928 is accepted, + // simply set w = t.Output() and dispense with the pipe. + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("pipe: %v", err) + } + done := make(chan struct{}) + go func() { + for sc := bufio.NewScanner(r); sc.Scan(); { + t.Log(sc.Text()) + } + r.Close() + close(done) + }() + + // Also do the same for the global logger. + savedWriter, savedPrefix, savedFlags := log.Writer(), log.Prefix(), log.Flags() + log.SetPrefix("log: ") + log.SetOutput(w) + log.SetFlags(0) + + oldStderr := os.Stderr + os.Stderr = w + t.Cleanup(func() { + w.Close() // ignore error + os.Stderr = oldStderr + + log.SetOutput(savedWriter) + log.SetPrefix(savedPrefix) + log.SetFlags(savedFlags) + + // Don't let test finish before final t.Log. + <-done + }) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 6e83c6f..27a2b17 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -109,8 +109,13 @@ func CoreType(T types.Type) types.Type { // // NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func NormalTerms(typ types.Type) ([]*types.Term, error) { - switch typ := typ.Underlying().(type) { +func NormalTerms(T types.Type) ([]*types.Term, error) { + // typeSetOf(T) == typeSetOf(Unalias(T)) + typ := types.Unalias(T) + if named, ok := typ.(*types.Named); ok { + typ = named.Underlying() + } + switch typ := typ.(type) { case *types.TypeParam: return StructuralTerms(typ) case *types.Union: @@ -118,7 +123,7 @@ func NormalTerms(typ types.Type) ([]*types.Term, error) { case *types.Interface: return InterfaceTermSet(typ) default: - return []*types.Term{types.NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, T)}, nil } } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 131caab..235a6de 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -966,7 +966,7 @@ const ( // var _ = string(x) InvalidConversion - // InvalidUntypedConversion occurs when an there is no valid implicit + // InvalidUntypedConversion occurs when there is no valid implicit // conversion from an untyped value satisfying the type constraints of the // context in which it is used. // diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index e54accc..8352ea7 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -12,7 +12,8 @@ import ( // type of recv, which may be of the form N or *N, or aliases thereof. // It also reports whether a Pointer was present. // -// The named result may be nil in ill-typed code. +// The named result may be nil if recv is from a method on an +// anonymous interface or struct types or in ill-typed code. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() if ptr, ok := types.Unalias(t).(*types.Pointer); ok { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index a93d51f..3453487 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -120,3 +120,8 @@ func Origin(t NamedOrAlias) NamedOrAlias { } return t } + +// IsPackageLevel reports whether obj is a package-level symbol. +func IsPackageLevel(obj types.Object) bool { + return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope() +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go new file mode 100644 index 0000000..e5da049 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -0,0 +1,40 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +// TODO(adonovan): when CL 645115 lands, define the go1.25 version of +// this API that actually does something. + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/vendor/modules.txt b/vendor/modules.txt index 38f6603..a7f3fd0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,10 +11,10 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/sync v0.10.0 +# golang.org/x/sync v0.11.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/tools v0.29.0 +# golang.org/x/tools v0.30.0 ## explicit; go 1.22.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/analysistest @@ -32,6 +32,7 @@ golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/aliases golang.org/x/tools/internal/analysisinternal +golang.org/x/tools/internal/astutil/edge golang.org/x/tools/internal/diff golang.org/x/tools/internal/diff/lcs golang.org/x/tools/internal/event @@ -44,7 +45,6 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/goroot golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits -golang.org/x/tools/internal/robustio golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/testenv golang.org/x/tools/internal/typeparams