Skip to content

Commit

Permalink
fix(repo): load repositories should fail on duplicates (#1937)
Browse files Browse the repository at this point in the history
We've recently had issues because we have multiple `go_repository` rules
with the same name, at different versions.

If we run `go mod tidy` and `gazelle update-repos`, the rule that gets
updated isn't always the one that ends up being used in the build.

To solve this, we are planning to add a check in CI to make sure no
duplicate go_repository rules are loaded.

This change is needed in order to use `gazelle/repo` to enable this
feature. This can also be potentially used to help aid others in
migrating to Bzlmod or simply cleaning up their repos.
  • Loading branch information
tyler-french authored Oct 15, 2024
1 parent 186dedd commit 3dd8374
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 21 deletions.
22 changes: 9 additions & 13 deletions cmd/generate_repo_config/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ func TestGenerateRepoConfig(t *testing.T) {
giveWorkspace string
giveReposContent string
wantContent string
wantFail bool
}{
{
name: "no duplicates",
Expand Down Expand Up @@ -101,19 +102,7 @@ def go_repositories():
tag = "1.2",
)
`,
wantContent: `
# Code generated by generate_repo_config.go; DO NOT EDIT.
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com2/pkg/errors",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org2/x/net",
)
`,
wantFail: true,
},
}

Expand All @@ -135,6 +124,13 @@ go_repository(
tmp := t.TempDir()

got, err := generateRepoConfig(filepath.Join(tmp, "WORKSPACE"), filepath.Join(dir, "WORKSPACE"))
if tt.wantFail {
if err == nil {
t.Fatal("wanted error but got nil")
}
return
}

if err != nil {
t.Fatal(err)
}
Expand Down
25 changes: 17 additions & 8 deletions repo/repo.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,16 +83,19 @@ func IsFromDirective(repo *rule.Rule) bool {
// with the following prioritization:
// - rules that were provided as directives have precedence
// - rules that were provided last
func (l *loader) add(file *rule.File, repo *rule.Rule) {
func (l *loader) add(file *rule.File, repo *rule.Rule) error {
name := repo.Name()
if name == "" {
return
return nil
}

if i, ok := l.repoIndexMap[repo.Name()]; ok {
if !IsFromDirective(l.repos[i]) && !IsFromDirective(repo) {
return fmt.Errorf("found duplicate repo %q", repo.Name())
}
if IsFromDirective(l.repos[i]) && !IsFromDirective(repo) {
// We always prefer directives over non-directives
return
return nil
}
// Update existing rule to new rule
l.repos[i] = repo
Expand All @@ -101,6 +104,7 @@ func (l *loader) add(file *rule.File, repo *rule.Rule) {
l.repoIndexMap[name] = len(l.repos) - 1
}
l.repoFileMap[name] = file
return nil
}

// visit returns true exactly once for each file,function key, and false for all future instances
Expand All @@ -113,17 +117,20 @@ func (l *loader) visit(file, function string) bool {
}

// ListRepositories extracts metadata about repositories declared in a
// file.
// file. If there are duplicate rules that are both not defined from a
// directive, it will fail.
func ListRepositories(workspace *rule.File) (repos []*rule.Rule, repoFileMap map[string]*rule.File, err error) {
l := &loader{
repoRoot: filepath.Dir(workspace.Path),
repoIndexMap: make(map[string]int),
repoFileMap: make(map[string]*rule.File),
visited: make(map[macroKey]struct{}),
}

for _, repo := range workspace.Rules {
l.add(workspace, repo)
if err := l.add(workspace, repo); err != nil {
return nil, nil, err
}
}
if err := l.loadExtraRepos(workspace); err != nil {
return nil, nil, err
Expand Down Expand Up @@ -164,7 +171,9 @@ func (l *loader) loadRepositoriesFromMacro(macro *RepoMacro) error {
for _, rule := range macroFile.Rules {
// (Incorrectly) assume that anything with a name attribute is a rule, not a macro to recurse into
if rule.Name() != "" {
l.add(macroFile, rule)
if err := l.add(macroFile, rule); err != nil {
return err
}
continue
}
if !macro.Leveled {
Expand Down

0 comments on commit 3dd8374

Please sign in to comment.