Skip to content

Commit

Permalink
gopls/internal/lsp/cache: use persistent.Set in a couple places
Browse files Browse the repository at this point in the history
Use the new persistent.Set type to track known dirs and unloadable
files.

Change-Id: I3e0d4bdc846f4c37a0046a01bf67d83bc06b9598
Reviewed-on: https://go-review.googlesource.com/c/tools/+/524762
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Alan Donovan <adonovan@google.com>
Auto-Submit: Robert Findley <rfindley@google.com>
  • Loading branch information
findleyr authored and gopherbot committed Sep 1, 2023
1 parent 38b898b commit 21090a2
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 66 deletions.
43 changes: 0 additions & 43 deletions gopls/internal/lsp/cache/maps.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,46 +76,3 @@ func (m filesMap) overlays() []*Overlay {
}
return overlays
}

type knownDirsSet struct {
impl *persistent.Map[span.URI, struct{}]
}

func newKnownDirsSet() knownDirsSet {
return knownDirsSet{
impl: new(persistent.Map[span.URI, struct{}]),
}
}

func (s knownDirsSet) Clone() knownDirsSet {
return knownDirsSet{
impl: s.impl.Clone(),
}
}

func (s knownDirsSet) Destroy() {
s.impl.Destroy()
}

func (s knownDirsSet) Contains(key span.URI) bool {
_, ok := s.impl.Get(key)
return ok
}

func (s knownDirsSet) Range(do func(key span.URI)) {
s.impl.Range(func(key span.URI, value struct{}) {
do(key)
})
}

func (s knownDirsSet) SetAll(other knownDirsSet) {
s.impl.SetAll(other.impl)
}

func (s knownDirsSet) Insert(key span.URI) {
s.impl.Set(key, struct{}{}, nil)
}

func (s knownDirsSet) Remove(key span.URI) {
s.impl.Delete(key)
}
12 changes: 6 additions & 6 deletions gopls/internal/lsp/cache/session.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,13 +176,13 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI,
activePackages: new(persistent.Map[PackageID, *Package]),
symbolizeHandles: new(persistent.Map[span.URI, *memoize.Promise]),
workspacePackages: make(map[PackageID]PackagePath),
unloadableFiles: make(map[span.URI]struct{}),
unloadableFiles: new(persistent.Set[span.URI]),
parseModHandles: new(persistent.Map[span.URI, *memoize.Promise]),
parseWorkHandles: new(persistent.Map[span.URI, *memoize.Promise]),
modTidyHandles: new(persistent.Map[span.URI, *memoize.Promise]),
modVulnHandles: new(persistent.Map[span.URI, *memoize.Promise]),
modWhyHandles: new(persistent.Map[span.URI, *memoize.Promise]),
knownSubdirs: newKnownDirsSet(),
knownSubdirs: new(persistent.Set[span.URI]),
workspaceModFiles: wsModFiles,
workspaceModFilesErr: wsModFilesErr,
pkgIndex: typerefs.NewPackageIndex(),
Expand Down Expand Up @@ -619,15 +619,15 @@ func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes
// knownDirectories returns all of the directories known to the given
// snapshots, including workspace directories and their subdirectories.
// It is responsibility of the caller to destroy the returned set.
func knownDirectories(ctx context.Context, snapshots []*snapshot) knownDirsSet {
result := newKnownDirsSet()
func knownDirectories(ctx context.Context, snapshots []*snapshot) *persistent.Set[span.URI] {
result := new(persistent.Set[span.URI])
for _, snapshot := range snapshots {
dirs := snapshot.dirs(ctx)
for _, dir := range dirs {
result.Insert(dir)
result.Add(dir)
}
knownSubdirs := snapshot.getKnownSubdirs(dirs)
result.SetAll(knownSubdirs)
result.AddAll(knownSubdirs)
knownSubdirs.Destroy()
}
return result
Expand Down
30 changes: 13 additions & 17 deletions gopls/internal/lsp/cache/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ type snapshot struct {
shouldLoad map[PackageID][]PackagePath

// unloadableFiles keeps track of files that we've failed to load.
unloadableFiles map[span.URI]struct{}
unloadableFiles *persistent.Set[span.URI]

// TODO(rfindley): rename the handles below to "promises". A promise is
// different from a handle (we mutate the package handle.)
Expand All @@ -152,7 +152,7 @@ type snapshot struct {

// knownSubdirs is the set of subdirectory URIs in the workspace,
// used to create glob patterns for file watching.
knownSubdirs knownDirsSet
knownSubdirs *persistent.Set[span.URI]
knownSubdirsCache map[string]struct{} // memo of knownSubdirs as a set of filenames
// unprocessedSubdirChanges are any changes that might affect the set of
// subdirectories in the workspace. They are not reflected to knownSubdirs
Expand Down Expand Up @@ -269,6 +269,7 @@ func (s *snapshot) destroy(destroyedBy string) {
s.modTidyHandles.Destroy()
s.modVulnHandles.Destroy()
s.modWhyHandles.Destroy()
s.unloadableFiles.Destroy()
}

func (s *snapshot) SequenceID() uint64 {
Expand Down Expand Up @@ -750,7 +751,7 @@ func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]*source
}

// Check if uri is known to be unloadable.
_, unloadable := s.unloadableFiles[uri]
unloadable := s.unloadableFiles.Contains(uri)

s.mu.Unlock()

Expand Down Expand Up @@ -803,7 +804,7 @@ func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]*source
// so if we get here and still have
// no IDs, uri is unloadable.
if !unloadable && len(ids) == 0 {
s.unloadableFiles[uri] = struct{}{}
s.unloadableFiles.Add(uri)
}

// Sort packages "narrowest" to "widest" (in practice:
Expand Down Expand Up @@ -1017,14 +1018,14 @@ func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) {
defer s.mu.Unlock()

s.knownSubdirs.Destroy()
s.knownSubdirs = newKnownDirsSet()
s.knownSubdirs = new(persistent.Set[span.URI])
s.knownSubdirsCache = nil
s.files.Range(func(uri span.URI, fh source.FileHandle) {
s.addKnownSubdirLocked(uri, dirs)
})
}

func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) knownDirsSet {
func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) *persistent.Set[span.URI] {
s.mu.Lock()
defer s.mu.Unlock()

Expand Down Expand Up @@ -1075,7 +1076,7 @@ func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) {
if s.knownSubdirs.Contains(uri) {
break
}
s.knownSubdirs.Insert(uri)
s.knownSubdirs.Add(uri)
dir = filepath.Dir(dir)
s.knownSubdirsCache = nil
}
Expand Down Expand Up @@ -1592,7 +1593,7 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error {
s.mu.Lock()
loadable := files[:0]
for _, file := range files {
if _, unloadable := s.unloadableFiles[file.URI()]; !unloadable {
if !s.unloadableFiles.Contains(file.URI()) {
loadable = append(loadable, file)
}
}
Expand Down Expand Up @@ -1655,7 +1656,7 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error {
// metadata graph that resulted from loading.
uri := file.URI()
if len(s.meta.ids[uri]) == 0 {
s.unloadableFiles[uri] = struct{}{}
s.unloadableFiles.Add(uri)
}
}

Expand Down Expand Up @@ -1969,7 +1970,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
files: s.files.Clone(),
symbolizeHandles: s.symbolizeHandles.Clone(),
workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)),
unloadableFiles: s.unloadableFiles.Clone(), // see the TODO for unloadableFiles below
parseModHandles: s.parseModHandles.Clone(),
parseWorkHandles: s.parseWorkHandles.Clone(),
modTidyHandles: s.modTidyHandles.Clone(),
Expand All @@ -1993,16 +1994,11 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
// incref/decref operation that might destroy it prematurely.)
release := result.Acquire()

// Copy the set of unloadable files.
//
// TODO(rfindley): this looks wrong. Shouldn't we clear unloadableFiles on
// TODO(rfindley): this looks wrong. Should we clear unloadableFiles on
// changes to environment or workspace layout, or more generally on any
// metadata change?
//
// Maybe not, as major configuration changes cause a new view.
for k, v := range s.unloadableFiles {
result.unloadableFiles[k] = v
}

// Add all of the known subdirectories, but don't update them for the
// changed files. We need to rebuild the workspace module to know the
Expand Down Expand Up @@ -2119,7 +2115,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
// TODO(rfindley): this also looks wrong, as typing in an unloadable file
// will result in repeated reloads. We should only delete if metadata
// changed.
delete(result.unloadableFiles, uri)
result.unloadableFiles.Remove(uri)
}

// Deleting an import can cause list errors due to import cycles to be
Expand Down

0 comments on commit 21090a2

Please sign in to comment.