diff --git a/pkg/app/piped/cmd/piped/piped.go b/pkg/app/piped/cmd/piped/piped.go index 2e405dc59c..db939d24c4 100644 --- a/pkg/app/piped/cmd/piped/piped.go +++ b/pkg/app/piped/cmd/piped/piped.go @@ -53,6 +53,7 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/piped/controller/controllermetrics" "github.com/pipe-cd/pipecd/pkg/app/piped/driftdetector" "github.com/pipe-cd/pipecd/pkg/app/piped/eventwatcher" + "github.com/pipe-cd/pipecd/pkg/app/piped/livestatereporter" "github.com/pipe-cd/pipecd/pkg/app/piped/livestatestore" k8slivestatestoremetrics "github.com/pipe-cd/pipecd/pkg/app/piped/livestatestore/kubernetes/kubernetesmetrics" "github.com/pipe-cd/pipecd/pkg/app/piped/notifier" @@ -355,6 +356,14 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { liveStateGetter = s.Getter() } + // Start running application live state reporter. + { + r := livestatereporter.NewReporter(applicationLister, liveStateGetter, apiClient, cfg, input.Logger) + group.Go(func() error { + return r.Run(ctx) + }) + } + decrypter, err := p.initializeSecretDecrypter(cfg) if err != nil { input.Logger.Error("failed to initialize secret decrypter", zap.Error(err)) @@ -366,6 +375,7 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { d, err := driftdetector.NewDetector( applicationLister, gitClient, + liveStateGetter, apiClient, appManifestsCache, cfg, diff --git a/pkg/app/piped/driftdetector/detector.go b/pkg/app/piped/driftdetector/detector.go index 9beb30dbed..a9da259e7d 100644 --- a/pkg/app/piped/driftdetector/detector.go +++ b/pkg/app/piped/driftdetector/detector.go @@ -27,6 +27,10 @@ import ( "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "github.com/pipe-cd/pipecd/pkg/app/piped/driftdetector/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/piped/driftdetector/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/piped/driftdetector/terraform" + "github.com/pipe-cd/pipecd/pkg/app/piped/livestatestore" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" "github.com/pipe-cd/pipecd/pkg/cache" "github.com/pipe-cd/pipecd/pkg/config" @@ -74,6 +78,7 @@ type providerDetector interface { func NewDetector( appLister applicationLister, gitClient gitClient, + stateGetter livestatestore.Getter, apiClient apiClient, appManifestsCache cache.Cache, cfg *config.PipedSpec, @@ -92,6 +97,60 @@ func NewDetector( for _, cp := range cfg.PlatformProviders { switch cp.Type { + case model.PlatformProviderKubernetes: + sg, ok := stateGetter.KubernetesGetter(cp.Name) + if !ok { + return nil, fmt.Errorf(format, cp.Name) + } + d.detectors = append(d.detectors, kubernetes.NewDetector( + cp, + appLister, + gitClient, + sg, + d, + appManifestsCache, + cfg, + sd, + logger, + )) + + case model.PlatformProviderCloudRun: + sg, ok := stateGetter.CloudRunGetter(cp.Name) + if !ok { + return nil, fmt.Errorf(format, cp.Name) + } + d.detectors = append(d.detectors, cloudrun.NewDetector( + cp, + appLister, + gitClient, + sg, + d, + appManifestsCache, + cfg, + sd, + logger, + )) + + case model.PlatformProviderTerraform: + if !*cp.TerraformConfig.DriftDetectionEnabled { + continue + } + sg, ok := stateGetter.TerraformGetter(cp.Name) + if !ok { + return nil, fmt.Errorf(format, cp.Name) + } + d.detectors = append(d.detectors, terraform.NewDetector( + cp, + appLister, + gitClient, + sg, + d, + appManifestsCache, + cfg, + sd, + logger, + )) + default: } } diff --git a/pkg/app/piped/livestatereporter/reporter.go b/pkg/app/piped/livestatereporter/reporter.go index a1962a6d41..2f2ffc45f6 100644 --- a/pkg/app/piped/livestatereporter/reporter.go +++ b/pkg/app/piped/livestatereporter/reporter.go @@ -25,6 +25,9 @@ import ( "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "github.com/pipe-cd/pipecd/pkg/app/piped/livestatereporter/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/piped/livestatereporter/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/piped/livestatestore" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" "github.com/pipe-cd/pipecd/pkg/config" "github.com/pipe-cd/pipecd/pkg/model" @@ -53,7 +56,7 @@ type providerReporter interface { ProviderName() string } -func NewReporter(appLister applicationLister, apiClient apiClient, cfg *config.PipedSpec, logger *zap.Logger) Reporter { +func NewReporter(appLister applicationLister, stateGetter livestatestore.Getter, apiClient apiClient, cfg *config.PipedSpec, logger *zap.Logger) Reporter { r := &reporter{ reporters: make([]providerReporter, 0, len(cfg.PlatformProviders)), logger: logger.Named("live-state-reporter"), @@ -62,6 +65,20 @@ func NewReporter(appLister applicationLister, apiClient apiClient, cfg *config.P const errFmt = "unable to find live state getter for platform provider: %s" for _, cp := range cfg.PlatformProviders { switch cp.Type { + case model.PlatformProviderKubernetes: + sg, ok := stateGetter.KubernetesGetter(cp.Name) + if !ok { + r.logger.Error(fmt.Sprintf(errFmt, cp.Name)) + continue + } + r.reporters = append(r.reporters, kubernetes.NewReporter(cp, appLister, sg, apiClient, logger)) + case model.PlatformProviderCloudRun: + sg, ok := stateGetter.CloudRunGetter(cp.Name) + if !ok { + r.logger.Error(fmt.Sprintf(errFmt, cp.Name)) + continue + } + r.reporters = append(r.reporters, cloudrun.NewReporter(cp, appLister, sg, apiClient, logger)) } } diff --git a/pkg/app/pipedv1/analysisprovider/http/http.go b/pkg/app/pipedv1/analysisprovider/http/http.go new file mode 100644 index 0000000000..e707ac8dec --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/http/http.go @@ -0,0 +1,80 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package http provides a way to analyze with http requests. +// This allows you to do smoke tests, load tests and so on, at your leisure. +package http + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/pipe-cd/pipecd/pkg/config" +) + +const ( + ProviderType = "HTTP" + defaultTimeout = 30 * time.Second +) + +type Provider struct { + client *http.Client +} + +func (p *Provider) Type() string { + return ProviderType +} + +func NewProvider(timeout time.Duration) *Provider { + if timeout == 0 { + timeout = defaultTimeout + } + return &Provider{ + client: &http.Client{Timeout: timeout}, + } +} + +// Run sends an HTTP request and then evaluate whether the response is expected one. +func (p *Provider) Run(ctx context.Context, cfg *config.AnalysisHTTP) (bool, string, error) { + req, err := p.makeRequest(ctx, cfg) + if err != nil { + return false, "", err + } + + res, err := p.client.Do(req) + if err != nil { + return false, "", err + } + defer res.Body.Close() + + if res.StatusCode != cfg.ExpectedCode { + return false, "", fmt.Errorf("unexpected status code %d", res.StatusCode) + } + // TODO: Decide how to check if the body is expected one. + return true, "", nil +} + +func (p *Provider) makeRequest(ctx context.Context, cfg *config.AnalysisHTTP) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, cfg.Method, cfg.URL, nil) + if err != nil { + return nil, err + } + req.Header = make(http.Header, len(cfg.Headers)) + for _, h := range cfg.Headers { + req.Header.Set(h.Key, h.Value) + } + return req, nil +} diff --git a/pkg/app/pipedv1/analysisprovider/log/factory/factory.go b/pkg/app/pipedv1/analysisprovider/log/factory/factory.go new file mode 100644 index 0000000000..8d2e6b5a43 --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/log/factory/factory.go @@ -0,0 +1,47 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package factory + +import ( + "fmt" + "os" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/log" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/log/stackdriver" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +// NewProvider generates an appropriate provider according to analysis provider config. +func NewProvider(providerCfg *config.PipedAnalysisProvider, logger *zap.Logger) (provider log.Provider, err error) { + switch providerCfg.Type { + case model.AnalysisProviderStackdriver: + cfg := providerCfg.StackdriverConfig + sa, err := os.ReadFile(cfg.ServiceAccountFile) + if err != nil { + return nil, err + } + provider, err = stackdriver.NewProvider(sa) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("any of providers config not found") + } + return provider, nil +} diff --git a/pkg/app/pipedv1/analysisprovider/log/provider.go b/pkg/app/pipedv1/analysisprovider/log/provider.go new file mode 100644 index 0000000000..dba70bcb65 --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/log/provider.go @@ -0,0 +1,28 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "context" +) + +// Provider represents a client for log provider which provides logs for analysis. +type Provider interface { + Type() string + // Evaluate runs the given query against the log provider, + // and then checks if there is at least one error log. + // Returns the result reason if non-error occurred. + Evaluate(ctx context.Context, query string) (result bool, reason string, err error) +} diff --git a/pkg/app/pipedv1/analysisprovider/log/stackdriver/stackdriver.go b/pkg/app/pipedv1/analysisprovider/log/stackdriver/stackdriver.go new file mode 100644 index 0000000000..cf2da9c0a4 --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/log/stackdriver/stackdriver.go @@ -0,0 +1,43 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "time" +) + +const ProviderType = "StackdriverLogging" + +// Provider is a client for stackdriver. +type Provider struct { + serviceAccount []byte + + timeout time.Duration +} + +func NewProvider(serviceAccount []byte) (*Provider, error) { + return &Provider{ + serviceAccount: serviceAccount, + }, nil +} + +func (p *Provider) Type() string { + return ProviderType +} + +func (p *Provider) Evaluate(ctx context.Context, query string) (bool, string, error) { + return false, "", nil +} diff --git a/pkg/app/pipedv1/analysisprovider/metrics/datadog/datadog.go b/pkg/app/pipedv1/analysisprovider/metrics/datadog/datadog.go new file mode 100644 index 0000000000..be6c742476 --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/metrics/datadog/datadog.go @@ -0,0 +1,156 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datadog + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/DataDog/datadog-api-client-go/api/v1/datadog" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" +) + +const ( + ProviderType = "Datadog" + defaultAddress = "datadoghq.com" + defaultTimeout = 30 * time.Second +) + +// Provider works as an HTTP client for datadog. +type Provider struct { + client *datadog.APIClient + runQuery func(request datadog.ApiQueryMetricsRequest) (datadog.MetricsQueryResponse, *http.Response, error) + + address string + apiKey string + applicationKey string + timeout time.Duration + logger *zap.Logger +} + +func NewProvider(apiKey, applicationKey string, opts ...Option) (*Provider, error) { + if apiKey == "" { + return nil, fmt.Errorf("api-key is required") + } + if applicationKey == "" { + return nil, fmt.Errorf("application-key is required") + } + + p := &Provider{ + client: datadog.NewAPIClient(datadog.NewConfiguration()), + runQuery: func(request datadog.ApiQueryMetricsRequest) (datadog.MetricsQueryResponse, *http.Response, error) { + return request.Execute() + }, + address: defaultAddress, + apiKey: apiKey, + applicationKey: applicationKey, + timeout: defaultTimeout, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(p) + } + return p, nil +} + +type Option func(*Provider) + +func WithAddress(address string) Option { + return func(p *Provider) { + p.address = address + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(p *Provider) { + p.logger = logger.Named("datadog-provider") + } +} + +func WithTimeout(timeout time.Duration) Option { + return func(p *Provider) { + p.timeout = timeout + } +} + +func (p *Provider) Type() string { + return ProviderType +} + +func (p *Provider) QueryPoints(ctx context.Context, query string, queryRange metrics.QueryRange) ([]metrics.DataPoint, error) { + ctx, cancel := context.WithTimeout(ctx, p.timeout) + defer cancel() + + if err := queryRange.Validate(); err != nil { + return nil, err + } + ctx = context.WithValue( + ctx, + datadog.ContextServerVariables, + map[string]string{"site": p.address}, + ) + ctx = context.WithValue( + ctx, + datadog.ContextAPIKeys, + map[string]datadog.APIKey{ + "apiKeyAuth": { + Key: p.apiKey, + }, + "appKeyAuth": { + Key: p.applicationKey, + }, + }, + ) + + req := p.client.MetricsApi.QueryMetrics(ctx). + From(queryRange.From.Unix()). + To(queryRange.To.Unix()). + Query(query) + resp, httpResp, err := p.runQuery(req) + if err != nil { + return nil, fmt.Errorf("failed to call \"MetricsApi.QueryMetrics\": %w", err) + } + if httpResp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected HTTP status code from %s: %d", httpResp.Request.URL, httpResp.StatusCode) + } + + // Collect data points given by the provider. + var size int + for _, s := range *resp.Series { + size += int(*s.Length) + } + out := make([]metrics.DataPoint, 0, size) + for _, s := range *resp.Series { + points := s.Pointlist + if points == nil || len(*points) == 0 { + return nil, fmt.Errorf("invalid response: no data points found within the queried range: %w", metrics.ErrNoDataFound) + } + for _, point := range *points { + if len(point) < 2 { + return nil, fmt.Errorf("invalid response: invalid data point found") + } + // NOTE: A data point is assumed to be kind of like [unix-time, value]. + out = append(out, metrics.DataPoint{ + Timestamp: int64(point[0]), + Value: point[1], + }) + } + } + return out, nil +} diff --git a/pkg/app/pipedv1/analysisprovider/metrics/datadog/datadog_test.go b/pkg/app/pipedv1/analysisprovider/metrics/datadog/datadog_test.go new file mode 100644 index 0000000000..b4d93e0d29 --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/metrics/datadog/datadog_test.go @@ -0,0 +1,117 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datadog + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/DataDog/datadog-api-client-go/api/v1/datadog" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" +) + +func TestProviderQueryPoints(t *testing.T) { + t.Parallel() + + toInt64Pointer := func(i int64) *int64 { return &i } + type queryResponse struct { + res datadog.MetricsQueryResponse + httpStatus int + err error + } + testcases := []struct { + name string + queryResponse queryResponse + query string + queryRange metrics.QueryRange + want []metrics.DataPoint + wantErr bool + }{ + { + name: "query failed", + queryResponse: queryResponse{ + res: datadog.MetricsQueryResponse{}, + httpStatus: http.StatusOK, + err: fmt.Errorf("query failed"), + }, + query: "foo", + queryRange: metrics.QueryRange{ + From: time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), + To: time.Date(2009, time.January, 1, 0, 5, 0, 0, time.UTC), + }, + wantErr: true, + }, + { + name: "unexpected HTTP status given", + queryResponse: queryResponse{ + res: datadog.MetricsQueryResponse{}, + httpStatus: http.StatusInternalServerError, + }, + query: "foo", + queryRange: metrics.QueryRange{ + From: time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), + To: time.Date(2009, time.January, 1, 0, 5, 0, 0, time.UTC), + }, + wantErr: true, + }, + { + name: "multiple data points given", + queryResponse: queryResponse{ + res: datadog.MetricsQueryResponse{ + Series: &[]datadog.MetricsQueryMetadata{ + { + Length: toInt64Pointer(2), + Pointlist: &[][]float64{ + {1600000000, 0.1}, + {1600000001, 0.2}, + }, + }, + }, + }, + httpStatus: http.StatusOK, + }, + query: "foo", + queryRange: metrics.QueryRange{ + From: time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), + To: time.Date(2009, time.January, 1, 0, 5, 0, 0, time.UTC), + }, + want: []metrics.DataPoint{ + {Timestamp: 1600000000, Value: 0.1}, + {Timestamp: 1600000001, Value: 0.2}, + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + provider := Provider{ + client: datadog.NewAPIClient(datadog.NewConfiguration()), + runQuery: func(_ datadog.ApiQueryMetricsRequest) (datadog.MetricsQueryResponse, *http.Response, error) { + return tc.queryResponse.res, &http.Response{StatusCode: tc.queryResponse.httpStatus, Request: &http.Request{}}, tc.queryResponse.err + }, + timeout: defaultTimeout, + logger: zap.NewNop(), + } + got, err := provider.QueryPoints(context.Background(), tc.query, tc.queryRange) + assert.Equal(t, tc.wantErr, err != nil) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/analysisprovider/metrics/factory/factory.go b/pkg/app/pipedv1/analysisprovider/metrics/factory/factory.go new file mode 100644 index 0000000000..2b4a407d63 --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/metrics/factory/factory.go @@ -0,0 +1,95 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package factory + +import ( + "encoding/base64" + "fmt" + "os" + "strings" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics/datadog" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics/prometheus" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +// NewProvider generates an appropriate provider according to analysis provider config. +func NewProvider(analysisTempCfg *config.TemplatableAnalysisMetrics, providerCfg *config.PipedAnalysisProvider, logger *zap.Logger) (metrics.Provider, error) { + switch providerCfg.Type { + case model.AnalysisProviderPrometheus: + options := []prometheus.Option{ + prometheus.WithLogger(logger), + prometheus.WithTimeout(analysisTempCfg.Timeout.Duration()), + } + cfg := providerCfg.PrometheusConfig + if cfg.UsernameFile != "" && cfg.PasswordFile != "" { + username, err := os.ReadFile(cfg.UsernameFile) + if err != nil { + return nil, fmt.Errorf("failed to read the username file: %w", err) + } + password, err := os.ReadFile(cfg.PasswordFile) + if err != nil { + return nil, fmt.Errorf("failed to read the password file: %w", err) + } + options = append(options, prometheus.WithBasicAuth(strings.TrimSpace(string(username)), strings.TrimSpace(string(password)))) + } + return prometheus.NewProvider(providerCfg.PrometheusConfig.Address, options...) + case model.AnalysisProviderDatadog: + var apiKey, applicationKey string + cfg := providerCfg.DatadogConfig + if cfg.APIKeyFile != "" { + a, err := os.ReadFile(cfg.APIKeyFile) + if err != nil { + return nil, fmt.Errorf("failed to read the api-key file: %w", err) + } + apiKey = strings.TrimSpace(string(a)) + } + if cfg.ApplicationKeyFile != "" { + a, err := os.ReadFile(cfg.ApplicationKeyFile) + if err != nil { + return nil, fmt.Errorf("failed to read the application-key file: %w", err) + } + applicationKey = strings.TrimSpace(string(a)) + } + if cfg.APIKeyData != "" { + a, err := base64.StdEncoding.DecodeString(cfg.APIKeyData) + if err != nil { + return nil, fmt.Errorf("failed to decode the api-key data: %w", err) + } + apiKey = string(a) + } + if cfg.ApplicationKeyData != "" { + a, err := base64.StdEncoding.DecodeString(cfg.ApplicationKeyData) + if err != nil { + return nil, fmt.Errorf("failed to decode the application-key data: %w", err) + } + applicationKey = string(a) + } + options := []datadog.Option{ + datadog.WithLogger(logger), + datadog.WithTimeout(analysisTempCfg.Timeout.Duration()), + } + if cfg.Address != "" { + options = append(options, datadog.WithAddress(cfg.Address)) + } + return datadog.NewProvider(apiKey, applicationKey, options...) + default: + return nil, fmt.Errorf("any of providers config not found") + } +} diff --git a/pkg/app/pipedv1/analysisprovider/metrics/prometheus/fake_client.go b/pkg/app/pipedv1/analysisprovider/metrics/prometheus/fake_client.go new file mode 100644 index 0000000000..f2c81c4a5a --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/metrics/prometheus/fake_client.go @@ -0,0 +1,35 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "context" + + v1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" +) + +type fakeClient struct { + value model.Value + err error + warnings v1.Warnings +} + +func (f fakeClient) QueryRange(_ context.Context, _ string, _ v1.Range) (model.Value, v1.Warnings, error) { + if f.err != nil { + return nil, f.warnings, f.err + } + return f.value, f.warnings, nil +} diff --git a/pkg/app/pipedv1/analysisprovider/metrics/prometheus/prometheus.go b/pkg/app/pipedv1/analysisprovider/metrics/prometheus/prometheus.go new file mode 100644 index 0000000000..b44473871d --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/metrics/prometheus/prometheus.go @@ -0,0 +1,178 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/prometheus/client_golang/api" + v1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" +) + +const ( + ProviderType = "Prometheus" + defaultTimeout = 30 * time.Second +) + +type client interface { + QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) +} + +// Provider is a client for prometheus. +type Provider struct { + api client + username string + password string + + timeout time.Duration + logger *zap.Logger +} + +func NewProvider(address string, opts ...Option) (*Provider, error) { + if address == "" { + return nil, fmt.Errorf("address is required") + } + + p := &Provider{ + timeout: defaultTimeout, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(p) + } + + cfg := api.Config{ + Address: address, + } + if p.username != "" && p.password != "" { + cfg.RoundTripper = config.NewBasicAuthRoundTripper(p.username, config.Secret(p.password), "", api.DefaultRoundTripper) + } + client, err := api.NewClient(cfg) + if err != nil { + return nil, err + } + p.api = v1.NewAPI(client) + return p, nil +} + +type Option func(*Provider) + +func WithTimeout(timeout time.Duration) Option { + return func(p *Provider) { + p.timeout = timeout + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(p *Provider) { + p.logger = logger.Named("prometheus-provider") + } +} + +func WithBasicAuth(username, password string) Option { + return func(p *Provider) { + p.username = username + p.password = password + } +} + +func (p *Provider) Type() string { + return ProviderType +} + +func (p *Provider) QueryPoints(ctx context.Context, query string, queryRange metrics.QueryRange) ([]metrics.DataPoint, error) { + ctx, cancel := context.WithTimeout(ctx, p.timeout) + defer cancel() + + if err := queryRange.Validate(); err != nil { + return nil, err + } + // NOTE: Use 1m as a step but make sure the "step" is smaller than the query range. + step := time.Minute + if diff := queryRange.To.Sub(queryRange.From); diff < step { + step = diff + } + + p.logger.Info("run query", zap.String("query", query)) + response, warnings, err := p.api.QueryRange(ctx, query, v1.Range{ + Start: queryRange.From, + End: queryRange.To, + Step: step, + }) + if err != nil { + return nil, fmt.Errorf("failed to run query for %s: %w", ProviderType, err) + } + for _, w := range warnings { + p.logger.Warn("non critical error occurred", zap.String("warning", w)) + } + + // Collect data points given by the provider. + // NOTE: Possibly, it's enough to handle only matrix type as long as calling range queries endpoint. + switch res := response.(type) { + case *model.Scalar: + if math.IsNaN(float64(res.Value)) { + return nil, fmt.Errorf("the value is not a number: %w", metrics.ErrNoDataFound) + } + return []metrics.DataPoint{ + {Timestamp: res.Timestamp.Unix(), Value: float64(res.Value)}, + }, nil + case model.Vector: + points := make([]metrics.DataPoint, 0, len(res)) + for _, s := range res { + if s == nil { + continue + } + if math.IsNaN(float64(s.Value)) { + return nil, fmt.Errorf("the value is not a number: %w", metrics.ErrNoDataFound) + } + points = append(points, metrics.DataPoint{ + Timestamp: s.Timestamp.Unix(), + Value: float64(s.Value), + }) + } + return points, nil + case model.Matrix: + var size int + for _, r := range res { + size += len(r.Values) + } + points := make([]metrics.DataPoint, 0, size) + for _, r := range res { + if len(r.Values) == 0 { + return nil, fmt.Errorf("zero value in range vector type returned: %w", metrics.ErrNoDataFound) + } + for _, point := range r.Values { + if math.IsNaN(float64(point.Value)) { + return nil, fmt.Errorf("the value is not a number: %w", metrics.ErrNoDataFound) + } + points = append(points, metrics.DataPoint{ + Timestamp: point.Timestamp.Unix(), + Value: float64(point.Value), + }) + } + } + return points, nil + default: + return nil, fmt.Errorf("unexpected data type returned") + } +} diff --git a/pkg/app/pipedv1/analysisprovider/metrics/prometheus/prometheus_test.go b/pkg/app/pipedv1/analysisprovider/metrics/prometheus/prometheus_test.go new file mode 100644 index 0000000000..469c974ede --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/metrics/prometheus/prometheus_test.go @@ -0,0 +1,139 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" +) + +func TestType(t *testing.T) { + t.Parallel() + + p := Provider{} + assert.Equal(t, ProviderType, p.Type()) +} + +func TestProviderQueryPoints(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + client client + query string + queryRange metrics.QueryRange + want []metrics.DataPoint + wantErr bool + }{ + { + name: "query failed", + client: &fakeClient{ + err: fmt.Errorf("query error"), + }, + query: "foo", + queryRange: metrics.QueryRange{ + From: time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), + To: time.Date(2009, time.January, 1, 0, 5, 0, 0, time.UTC), + }, + wantErr: true, + }, + { + name: "scalar data point returned", + client: &fakeClient{ + value: &model.Scalar{Timestamp: model.Time(1600000000), Value: model.SampleValue(0.1)}, + }, + query: "foo", + queryRange: metrics.QueryRange{ + From: time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), + To: time.Date(2009, time.January, 1, 0, 5, 0, 0, time.UTC), + }, + want: []metrics.DataPoint{ + {Timestamp: 1600000, Value: 0.1}, + }, + }, + { + name: "vector data points returned", + client: &fakeClient{ + value: model.Vector([]*model.Sample{ + { + Timestamp: model.Time(1600000000), + Value: model.SampleValue(0.1), + }, + { + Timestamp: model.Time(1600001000), + Value: model.SampleValue(0.2), + }, + }), + }, + query: "foo", + queryRange: metrics.QueryRange{ + From: time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), + To: time.Date(2009, time.January, 1, 0, 5, 0, 0, time.UTC), + }, + want: []metrics.DataPoint{ + {Timestamp: 1600000, Value: 0.1}, + {Timestamp: 1600001, Value: 0.2}, + }, + }, + { + name: "matrix data points returned", + client: &fakeClient{ + value: model.Matrix([]*model.SampleStream{ + { + Values: []model.SamplePair{ + { + Timestamp: model.Time(1600000000), + Value: model.SampleValue(0.1), + }, + { + Timestamp: model.Time(1600001000), + Value: model.SampleValue(0.2), + }, + }, + }, + }), + }, + query: "foo", + queryRange: metrics.QueryRange{ + From: time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), + To: time.Date(2009, time.January, 1, 0, 5, 0, 0, time.UTC), + }, + want: []metrics.DataPoint{ + {Timestamp: 1600000, Value: 0.1}, + {Timestamp: 1600001, Value: 0.2}, + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + provider := &Provider{ + api: tc.client, + timeout: defaultTimeout, + logger: zap.NewNop(), + } + got, err := provider.QueryPoints(context.Background(), tc.query, tc.queryRange) + assert.Equal(t, tc.wantErr, err != nil) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/analysisprovider/metrics/provider.go b/pkg/app/pipedv1/analysisprovider/metrics/provider.go new file mode 100644 index 0000000000..07f704aab9 --- /dev/null +++ b/pkg/app/pipedv1/analysisprovider/metrics/provider.go @@ -0,0 +1,72 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "context" + "errors" + "fmt" + "time" +) + +const timeFormat = "2006-01-02 15:04:05 MST" + +var ( + ErrNoDataFound = errors.New("no data found") +) + +// Provider represents a client for metrics provider which provides metrics for analysis. +type Provider interface { + Type() string + // QueryPoints gives back data points within the given range. + QueryPoints(ctx context.Context, query string, queryRange QueryRange) (points []DataPoint, err error) +} + +type DataPoint struct { + // Unix timestamp in seconds. + Timestamp int64 + Value float64 +} + +func (d *DataPoint) String() string { + // Timestamp is shown in UTC. + return fmt.Sprintf("timestamp: %q, value: %g", time.Unix(d.Timestamp, 0).UTC().Format(timeFormat), d.Value) +} + +// QueryRange represents a sliced time range. +type QueryRange struct { + // Required: Start of the queried time period + From time.Time + // End of the queried time period. Defaults to the current time. + To time.Time +} + +func (q *QueryRange) String() string { + // Timestamps are shown in UTC. + return fmt.Sprintf("from: %q, to: %q", q.From.UTC().Format(timeFormat), q.To.UTC().Format(timeFormat)) +} + +func (q *QueryRange) Validate() error { + if q.From.IsZero() { + return fmt.Errorf("start of the query range is required") + } + if q.To.IsZero() { + q.To = time.Now() + } + if q.From.After(q.To) { + return fmt.Errorf("\"to\" should be after \"from\"") + } + return nil +} diff --git a/pkg/app/pipedv1/chartrepo/chartrepo.go b/pkg/app/pipedv1/chartrepo/chartrepo.go new file mode 100644 index 0000000000..08090b2da9 --- /dev/null +++ b/pkg/app/pipedv1/chartrepo/chartrepo.go @@ -0,0 +1,87 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package chartrepo manages a list of configured helm repositories. +package chartrepo + +import ( + "context" + "fmt" + "os/exec" + + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + + "github.com/pipe-cd/pipecd/pkg/config" +) + +var updateGroup = &singleflight.Group{} + +type registry interface { + Helm(ctx context.Context, version string) (string, bool, error) +} + +// Add installs all specified Helm Chart repositories. +// https://helm.sh/docs/topics/chart_repository/ +// helm repo add fantastic-charts https://fantastic-charts.storage.googleapis.com +// helm repo add fantastic-charts https://fantastic-charts.storage.googleapis.com --username my-username --password my-password +func Add(ctx context.Context, repos []config.HelmChartRepository, reg registry, logger *zap.Logger) error { + helm, _, err := reg.Helm(ctx, "") + if err != nil { + return fmt.Errorf("failed to find helm to add repos (%w)", err) + } + + for _, repo := range repos { + args := []string{"repo", "add", repo.Name, repo.Address} + if repo.Insecure { + args = append(args, "--insecure-skip-tls-verify") + } + if repo.Username != "" || repo.Password != "" { + args = append(args, "--username", repo.Username, "--password", repo.Password) + } + cmd := exec.CommandContext(ctx, helm, args...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to add chart repository %s: %s (%w)", repo.Name, string(out), err) + } + logger.Info(fmt.Sprintf("successfully added chart repository: %s", repo.Name)) + } + return nil +} + +func Update(ctx context.Context, reg registry, logger *zap.Logger) error { + _, err, _ := updateGroup.Do("update", func() (interface{}, error) { + return nil, update(ctx, reg, logger) + }) + return err +} + +func update(ctx context.Context, reg registry, logger *zap.Logger) error { + logger.Info("start updating Helm chart repositories") + + helm, _, err := reg.Helm(ctx, "") + if err != nil { + return fmt.Errorf("failed to find helm to update repos (%w)", err) + } + + args := []string{"repo", "update"} + cmd := exec.CommandContext(ctx, helm, args...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to update Helm chart repositories: %s (%w)", string(out), err) + } + + logger.Info("successfully updated Helm chart repositories") + return nil +} diff --git a/pkg/app/pipedv1/cmd/piped/piped.go b/pkg/app/pipedv1/cmd/piped/piped.go index 7e88db432e..0517539d85 100644 --- a/pkg/app/pipedv1/cmd/piped/piped.go +++ b/pkg/app/pipedv1/cmd/piped/piped.go @@ -42,21 +42,26 @@ import ( "sigs.k8s.io/yaml" "github.com/pipe-cd/pipecd/pkg/admin" - "github.com/pipe-cd/pipecd/pkg/app/piped/driftdetector" - "github.com/pipe-cd/pipecd/pkg/app/piped/livestatereporter" - "github.com/pipe-cd/pipecd/pkg/app/piped/planpreview" - "github.com/pipe-cd/pipecd/pkg/app/piped/planpreview/planpreviewmetrics" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/analysisresultstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/applicationstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/commandstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/deploymentstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/eventstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/appconfigreporter" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/chartrepo" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/controller" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/controller/controllermetrics" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/driftdetector" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/eventwatcher" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatereporter" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore" + k8slivestatestoremetrics "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/kubernetes/kubernetesmetrics" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/notifier" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planpreview" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planpreview/planpreviewmetrics" + k8scloudprovidermetrics "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes/kubernetesmetrics" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/statsreporter" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/trigger" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" "github.com/pipe-cd/pipecd/pkg/cache/memorycache" @@ -68,6 +73,11 @@ import ( "github.com/pipe-cd/pipecd/pkg/rpc/rpcauth" "github.com/pipe-cd/pipecd/pkg/rpc/rpcclient" "github.com/pipe-cd/pipecd/pkg/version" + + // Import to preload all built-in executors to the default registry. + _ "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/registry" + // Import to preload all planners to the default registry. + _ "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner/registry" ) const ( @@ -158,6 +168,52 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { input.Logger.Info("successfully configured ssh-config") } + // Initialize default tool registry. + if err := toolregistry.InitDefaultRegistry(p.toolsDir, input.Logger); err != nil { + input.Logger.Error("failed to initialize default tool registry", zap.Error(err)) + return err + } + + // Add configured Helm chart repositories. + if repos := cfg.HTTPHelmChartRepositories(); len(repos) > 0 { + reg := toolregistry.DefaultRegistry() + if err := chartrepo.Add(ctx, repos, reg, input.Logger); err != nil { + input.Logger.Error("failed to add configured chart repositories", zap.Error(err)) + return err + } + if err := chartrepo.Update(ctx, reg, input.Logger); err != nil { + input.Logger.Error("failed to update Helm chart repositories", zap.Error(err)) + return err + } + } + + // Login to chart registries. + if regs := cfg.ChartRegistries; len(regs) > 0 { + reg := toolregistry.DefaultRegistry() + helm, _, err := reg.Helm(ctx, "") + if err != nil { + return fmt.Errorf("failed to find helm while login to chart registries (%w)", err) + } + + for _, r := range regs { + switch r.Type { + case config.OCIHelmChartRegistry: + if r.Username == "" || r.Password == "" { + continue + } + + if err := loginToOCIRegistry(ctx, helm, r.Address, r.Username, r.Password); err != nil { + input.Logger.Error(fmt.Sprintf("failed to login to %s Helm chart registry", r.Address), zap.Error(err)) + return err + } + input.Logger.Info("successfully logged in to Helm chart registry", zap.String("address", r.Address)) + + default: + return fmt.Errorf("unsupported Helm chart registry type: %s", r.Type) + } + } + } + pipedKey, err := cfg.LoadPipedKey() if err != nil { input.Logger.Error("failed to load piped key", zap.Error(err)) @@ -225,6 +281,13 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { git.WithEmail(cfg.Git.Email), git.WithLogger(input.Logger), } + for _, repo := range cfg.GitHelmChartRepositories() { + if f := repo.SSHKeyFile; f != "" { + // Configure git client to use the specified SSH key while fetching private Helm charts. + env := fmt.Sprintf("GIT_SSH_COMMAND=ssh -i %s -o StrictHostKeyChecking=no -F /dev/null", f) + gitOptions = append(gitOptions, git.WithGitEnvForRepo(repo.GitRemote, env)) + } + } gitClient, err := git.NewClient(gitOptions...) if err != nil { input.Logger.Error("failed to initialize git client", zap.Error(err)) @@ -283,9 +346,19 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { // Create memory caches. appManifestsCache := memorycache.NewTTLCache(ctx, time.Hour, time.Minute) + var liveStateGetter livestatestore.Getter + // Start running application live state store. + { + s := livestatestore.NewStore(ctx, cfg, applicationLister, p.gracePeriod, input.Logger) + group.Go(func() error { + return s.Run(ctx) + }) + liveStateGetter = s.Getter() + } + // Start running application live state reporter. { - r := livestatereporter.NewReporter(applicationLister, apiClient, cfg, input.Logger) + r := livestatereporter.NewReporter(applicationLister, liveStateGetter, apiClient, cfg, input.Logger) group.Go(func() error { return r.Run(ctx) }) @@ -302,6 +375,7 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { d, err := driftdetector.NewDetector( applicationLister, gitClient, + liveStateGetter, apiClient, appManifestsCache, cfg, @@ -332,6 +406,7 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { deploymentLister, commandLister, applicationLister, + livestatestore.LiveResourceLister{Getter: liveStateGetter}, analysisResultStore, notifier, decrypter, @@ -823,6 +898,8 @@ func registerMetrics(pipedID, projectID, launcherVersion string) *prometheus.Reg wrapped.Register(collectors.NewGoCollector()) wrapped.Register(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + k8scloudprovidermetrics.Register(wrapped) + k8slivestatestoremetrics.Register(wrapped) planpreviewmetrics.Register(wrapped) controllermetrics.Register(wrapped) diff --git a/pkg/app/pipedv1/controller/controller.go b/pkg/app/pipedv1/controller/controller.go index 8799e109f4..630965c541 100644 --- a/pkg/app/pipedv1/controller/controller.go +++ b/pkg/app/pipedv1/controller/controller.go @@ -34,6 +34,7 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/controller/controllermetrics" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/logpersister" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" "github.com/pipe-cd/pipecd/pkg/cache" "github.com/pipe-cd/pipecd/pkg/config" @@ -77,6 +78,10 @@ type applicationLister interface { Get(id string) (*model.Application, bool) } +type liveResourceLister interface { + ListKubernetesAppLiveResources(platformProvider, appID string) ([]provider.Manifest, bool) +} + type analysisResultStore interface { GetLatestAnalysisResult(ctx context.Context, applicationID string) (*model.AnalysisResult, error) PutLatestAnalysisResult(ctx context.Context, applicationID string, analysisResult *model.AnalysisResult) error @@ -106,6 +111,7 @@ type controller struct { deploymentLister deploymentLister commandLister commandLister applicationLister applicationLister + liveResourceLister liveResourceLister analysisResultStore analysisResultStore notifier notifier pipedConfig []byte @@ -147,6 +153,7 @@ func NewController( deploymentLister deploymentLister, commandLister commandLister, applicationLister applicationLister, + liveResourceLister liveResourceLister, analysisResultStore analysisResultStore, notifier notifier, sd secretDecrypter, @@ -168,6 +175,7 @@ func NewController( deploymentLister: deploymentLister, commandLister: commandLister, applicationLister: applicationLister, + liveResourceLister: liveResourceLister, analysisResultStore: analysisResultStore, notifier: notifier, secretDecrypter: sd, @@ -617,6 +625,7 @@ func (c *controller) startNewScheduler(ctx context.Context, d *model.Deployment) c.gitClient, c.commandLister, c.applicationLister, + c.liveResourceLister, c.analysisResultStore, c.logPersister, c.notifier, @@ -717,10 +726,15 @@ func (c *controller) cancelDeployment(ctx context.Context, d *model.Deployment, } type appLiveResourceLister struct { + lister liveResourceLister platformProvider string appID string } +func (l appLiveResourceLister) ListKubernetesResources() ([]provider.Manifest, bool) { + return l.lister.ListKubernetesAppLiveResources(l.platformProvider, l.appID) +} + func reportApplicationDeployingStatus(ctx context.Context, c apiClient, appID string, deploying bool) error { var ( err error diff --git a/pkg/app/pipedv1/controller/scheduler.go b/pkg/app/pipedv1/controller/scheduler.go index be24194019..0697f30a4f 100644 --- a/pkg/app/pipedv1/controller/scheduler.go +++ b/pkg/app/pipedv1/controller/scheduler.go @@ -28,9 +28,10 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/controller/controllermetrics" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" - registry "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/registry" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/registry" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/logpersister" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/metadatastore" + pln "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" "github.com/pipe-cd/pipecd/pkg/cache" "github.com/pipe-cd/pipecd/pkg/config" @@ -47,6 +48,7 @@ type scheduler struct { gitClient gitClient commandLister commandLister applicationLister applicationLister + liveResourceLister liveResourceLister analysisResultStore analysisResultStore logPersister logpersister.Persister metadataStore metadatastore.MetadataStore @@ -83,6 +85,7 @@ func newScheduler( gitClient gitClient, commandLister commandLister, applicationLister applicationLister, + liveResourceLister liveResourceLister, analysisResultStore analysisResultStore, lp logpersister.Persister, notifier notifier, @@ -107,6 +110,7 @@ func newScheduler( gitClient: gitClient, commandLister: commandLister, applicationLister: applicationLister, + liveResourceLister: liveResourceLister, analysisResultStore: analysisResultStore, logPersister: lp, metadataStore: metadatastore.NewMetadataStore(apiClient, d), @@ -471,7 +475,7 @@ func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage var stageConfig config.PipelineStage var stageConfigFound bool if ps.Predefined { - // FIXME: stageConfig, stageConfigFound = pln.GetPredefinedStage(ps.Id) + stageConfig, stageConfigFound = pln.GetPredefinedStage(ps.Id) } else { stageConfig, stageConfigFound = s.genericApplicationConfig.GetStage(ps.Index) } @@ -496,6 +500,11 @@ func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage deploymentID: s.deployment.Id, stageID: ps.Id, } + alrLister := appLiveResourceLister{ + lister: s.liveResourceLister, + platformProvider: app.PlatformProvider, + appID: app.Id, + } aStore := appAnalysisResultStore{ store: s.analysisResultStore, applicationID: app.Id, @@ -513,6 +522,7 @@ func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage LogPersister: lp, MetadataStore: s.metadataStore, AppManifestsCache: s.appManifestsCache, + AppLiveResourceLister: alrLister, AnalysisResultStore: aStore, Logger: s.logger, Notifier: s.notifier, diff --git a/pkg/app/pipedv1/driftdetector/cloudrun/detector.go b/pkg/app/pipedv1/driftdetector/cloudrun/detector.go new file mode 100644 index 0000000000..a2dfe474e1 --- /dev/null +++ b/pkg/app/pipedv1/driftdetector/cloudrun/detector.go @@ -0,0 +1,343 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/cloudrun" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/sourceprocesser" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/diff" + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + ListByPlatformProvider(name string) []*model.Application +} + +type gitClient interface { + Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) +} + +type secretDecrypter interface { + Decrypt(string) (string, error) +} + +type reporter interface { + ReportApplicationSyncState(ctx context.Context, appID string, state model.ApplicationSyncState) error +} + +type Detector interface { + Run(ctx context.Context) error + ProviderName() string +} + +type detector struct { + provider config.PipedPlatformProvider + appLister applicationLister + gitClient gitClient + stateGetter cloudrun.Getter + reporter reporter + appManifestsCache cache.Cache + interval time.Duration + config *config.PipedSpec + secretDecrypter secretDecrypter + logger *zap.Logger + + gitRepos map[string]git.Repo +} + +func NewDetector( + cp config.PipedPlatformProvider, + appLister applicationLister, + gitClient gitClient, + stateGetter cloudrun.Getter, + reporter reporter, + appManifestsCache cache.Cache, + cfg *config.PipedSpec, + sd secretDecrypter, + logger *zap.Logger, +) Detector { + + logger = logger.Named("cloudrun-detector").With( + zap.String("platform-provider", cp.Name), + ) + return &detector{ + provider: cp, + appLister: appLister, + gitClient: gitClient, + stateGetter: stateGetter, + reporter: reporter, + appManifestsCache: appManifestsCache, + interval: time.Minute, + config: cfg, + secretDecrypter: sd, + gitRepos: make(map[string]git.Repo), + logger: logger, + } +} + +func (d *detector) Run(ctx context.Context) error { + d.logger.Info("start running drift detector for cloudrun applications") + + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + d.logger.Info("drift detector for cloudrun applications has been stopped") + return nil + + case <-ticker.C: + d.check(ctx) + } + } +} + +func (d *detector) ProviderName() string { + return d.provider.Name +} + +func (d *detector) check(ctx context.Context) { + appsByRepo := d.listGroupedApplication() + + for repoID, apps := range appsByRepo { + gitRepo, ok := d.gitRepos[repoID] + if !ok { + // Clone repository for the first time. + gr, err := d.cloneGitRepository(ctx, repoID) + if err != nil { + d.logger.Error("failed to clone git repository", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + gitRepo = gr + d.gitRepos[repoID] = gitRepo + } + + // Fetch the latest commit to compare the states. + branch := gitRepo.GetClonedBranch() + if err := gitRepo.Pull(ctx, branch); err != nil { + d.logger.Error("failed to pull repository branch", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + + // Get the head commit of the repository. + headCommit, err := gitRepo.GetLatestCommit(ctx) + if err != nil { + d.logger.Error("failed to get head commit hash", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + + // Start checking all applications in this repository. + for _, app := range apps { + if err := d.checkApplication(ctx, app, gitRepo, headCommit); err != nil { + d.logger.Error(fmt.Sprintf("failed to check application: %s", app.Id), zap.Error(err)) + } + } + } +} + +func (d *detector) cloneGitRepository(ctx context.Context, repoID string) (git.Repo, error) { + repoCfg, ok := d.config.GetRepository(repoID) + if !ok { + return nil, fmt.Errorf("repository %s was not found in piped configuration", repoID) + } + return d.gitClient.Clone(ctx, repoID, repoCfg.Remote, repoCfg.Branch, "") +} + +// listGroupedApplication retrieves all applications those should be handled by this director +// and then groups them by repoID. +func (d *detector) listGroupedApplication() map[string][]*model.Application { + var ( + apps = d.appLister.ListByPlatformProvider(d.provider.Name) + m = make(map[string][]*model.Application) + ) + for _, app := range apps { + repoID := app.GitPath.Repo.Id + m[repoID] = append(m[repoID], app) + } + return m +} + +func (d *detector) checkApplication(ctx context.Context, app *model.Application, repo git.Repo, headCommit git.Commit) error { + headManifest, err := d.loadHeadServiceManifest(app, repo, headCommit) + if err != nil { + return err + } + d.logger.Info(fmt.Sprintf("application %s has a service manifest at commit %s", app.Id, headCommit.Hash)) + + liveManifest, ok := d.stateGetter.GetServiceManifest(app.Id) + if !ok { + return fmt.Errorf("failed to get live service manifest") + } + d.logger.Info(fmt.Sprintf("application %s has a live service manifest", app.Id)) + + result, err := provider.Diff( + liveManifest, + headManifest, + diff.WithEquateEmpty(), + diff.WithIgnoreAddingMapKeys(), + diff.WithCompareNumberAndNumericString(), + ) + if err != nil { + return err + } + + state := makeSyncState(result, headCommit.Hash) + + return d.reporter.ReportApplicationSyncState(ctx, app.Id, state) +} + +func (d *detector) loadHeadServiceManifest(app *model.Application, repo git.Repo, headCommit git.Commit) (provider.ServiceManifest, error) { + var ( + manifestCache = provider.ServiceManifestCache{ + AppID: app.Id, + Cache: d.appManifestsCache, + Logger: d.logger, + } + repoDir = repo.GetPath() + appDir = filepath.Join(repoDir, app.GitPath.Path) + ) + + manifest, ok := manifestCache.Get(headCommit.Hash) + if !ok { + // When the manifests were not in the cache we have to load them. + cfg, err := d.loadApplicationConfiguration(repoDir, app) + if err != nil { + return provider.ServiceManifest{}, fmt.Errorf("failed to load application configuration: %w", err) + } + + gds, ok := cfg.GetGenericApplication() + if !ok { + return provider.ServiceManifest{}, fmt.Errorf("unsupport application kind %s", cfg.Kind) + } + + var ( + encryptionUsed = d.secretDecrypter != nil && gds.Encryption != nil + attachmentUsed = gds.Attachment != nil + ) + + // We have to copy repository into another directory because + // decrypting the sealed secrets or attaching files might change the git repository. + if attachmentUsed || encryptionUsed { + dir, err := os.MkdirTemp("", "detector-git-processing") + if err != nil { + return provider.ServiceManifest{}, fmt.Errorf("failed to prepare a temporary directory for git repository (%w)", err) + } + defer os.RemoveAll(dir) + + repo, err = repo.Copy(filepath.Join(dir, "repo")) + if err != nil { + return provider.ServiceManifest{}, fmt.Errorf("failed to copy the cloned git repository (%w)", err) + } + repoDir := repo.GetPath() + appDir = filepath.Join(repoDir, app.GitPath.Path) + } + + var templProcessors []sourceprocesser.SourceTemplateProcessor + // Decrypting secrets to manifests. + if encryptionUsed { + templProcessors = append(templProcessors, sourceprocesser.NewSecretDecrypterProcessor(gds.Encryption, d.secretDecrypter)) + } + // Then attaching configurated files to manifests. + if attachmentUsed { + templProcessors = append(templProcessors, sourceprocesser.NewAttachmentProcessor(gds.Attachment)) + } + if len(templProcessors) > 0 { + sp := sourceprocesser.NewSourceProcessor(appDir, templProcessors...) + if err := sp.Process(); err != nil { + return provider.ServiceManifest{}, fmt.Errorf("failed to process source files: %w", err) + } + } + + var manifestFile string + if cfg.CloudRunApplicationSpec != nil { + manifestFile = cfg.CloudRunApplicationSpec.Input.ServiceManifestFile + } + + manifest, err = provider.LoadServiceManifest(appDir, manifestFile) + if err != nil { + return provider.ServiceManifest{}, fmt.Errorf("failed to load new service manifest: %w", err) + } + manifestCache.Put(headCommit.Hash, manifest) + } + return manifest, nil +} + +func (d *detector) loadApplicationConfiguration(repoPath string, app *model.Application) (*config.Config, error) { + path := filepath.Join(repoPath, app.GitPath.GetApplicationConfigFilePath()) + cfg, err := config.LoadFromYAML(path) + if err != nil { + return nil, err + } + if appKind, ok := cfg.Kind.ToApplicationKind(); !ok || appKind != app.Kind { + return nil, fmt.Errorf("application in application configuration file is not match, got: %s, expected: %s", appKind, app.Kind) + } + return cfg, nil +} + +func makeSyncState(r *provider.DiffResult, commit string) model.ApplicationSyncState { + if r.NoChange() { + return model.ApplicationSyncState{ + Status: model.ApplicationSyncStatus_SYNCED, + Timestamp: time.Now().Unix(), + } + } + + shortReason := "The service manifest doesn't be synced" + if len(commit) >= 7 { + commit = commit[:7] + } + + var b strings.Builder + b.WriteString(fmt.Sprintf("Diff between the defined state in Git at commit %s and actual live state:\n\n", commit)) + b.WriteString("--- Actual (LiveState)\n+++ Expected (Git)\n\n") + + details := r.Render(provider.DiffRenderOptions{ + // Currently, we do not use the diff command to render the result + // because CloudRun adds a large number of default values to the + // running manifest that causes a wrong diff text. + UseDiffCommand: false, + }) + b.WriteString(details) + + return model.ApplicationSyncState{ + Status: model.ApplicationSyncStatus_OUT_OF_SYNC, + ShortReason: shortReason, + Reason: b.String(), + Timestamp: time.Now().Unix(), + } +} diff --git a/pkg/app/pipedv1/driftdetector/detector.go b/pkg/app/pipedv1/driftdetector/detector.go new file mode 100644 index 0000000000..779d0cb6ca --- /dev/null +++ b/pkg/app/pipedv1/driftdetector/detector.go @@ -0,0 +1,213 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package driftdetector provides a piped component +// that continuously checks configuration drift between the current live state +// and the state defined at the latest commit of all applications. +package driftdetector + +import ( + "context" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/driftdetector/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/driftdetector/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/driftdetector/terraform" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore" + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + ListByPlatformProvider(name string) []*model.Application +} + +type deploymentLister interface { + ListAppHeadDeployments() map[string]*model.Deployment +} + +type gitClient interface { + Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) +} + +type apiClient interface { + ReportApplicationSyncState(ctx context.Context, req *pipedservice.ReportApplicationSyncStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationSyncStateResponse, error) +} + +type secretDecrypter interface { + Decrypt(string) (string, error) +} + +type Detector interface { + Run(ctx context.Context) error +} + +type detector struct { + apiClient apiClient + detectors []providerDetector + syncStates map[string]model.ApplicationSyncState + mu sync.RWMutex + logger *zap.Logger +} + +type providerDetector interface { + Run(ctx context.Context) error + ProviderName() string +} + +func NewDetector( + appLister applicationLister, + gitClient gitClient, + stateGetter livestatestore.Getter, + apiClient apiClient, + appManifestsCache cache.Cache, + cfg *config.PipedSpec, + sd secretDecrypter, + logger *zap.Logger, +) (Detector, error) { + + d := &detector{ + apiClient: apiClient, + detectors: make([]providerDetector, 0, len(cfg.PlatformProviders)), + syncStates: make(map[string]model.ApplicationSyncState), + logger: logger.Named("drift-detector"), + } + + const format = "unable to find live state getter for platform provider: %s" + + for _, cp := range cfg.PlatformProviders { + switch cp.Type { + case model.PlatformProviderKubernetes: + sg, ok := stateGetter.KubernetesGetter(cp.Name) + if !ok { + return nil, fmt.Errorf(format, cp.Name) + } + d.detectors = append(d.detectors, kubernetes.NewDetector( + cp, + appLister, + gitClient, + sg, + d, + appManifestsCache, + cfg, + sd, + logger, + )) + + case model.PlatformProviderCloudRun: + sg, ok := stateGetter.CloudRunGetter(cp.Name) + if !ok { + return nil, fmt.Errorf(format, cp.Name) + } + d.detectors = append(d.detectors, cloudrun.NewDetector( + cp, + appLister, + gitClient, + sg, + d, + appManifestsCache, + cfg, + sd, + logger, + )) + + case model.PlatformProviderTerraform: + if !*cp.TerraformConfig.DriftDetectionEnabled { + continue + } + sg, ok := stateGetter.TerraformGetter(cp.Name) + if !ok { + return nil, fmt.Errorf(format, cp.Name) + } + d.detectors = append(d.detectors, terraform.NewDetector( + cp, + appLister, + gitClient, + sg, + d, + appManifestsCache, + cfg, + sd, + logger, + )) + + default: + } + } + + return d, nil +} + +func (d *detector) Run(ctx context.Context) error { + group, ctx := errgroup.WithContext(ctx) + + for i, detector := range d.detectors { + detector := detector + // Avoid starting all detectors at the same time to reduce the API call burst. + time.Sleep(time.Duration(i) * 10 * time.Second) + d.logger.Info(fmt.Sprintf("starting drift detector for cloud provider: %s", detector.ProviderName())) + + group.Go(func() error { + return detector.Run(ctx) + }) + } + + d.logger.Info(fmt.Sprintf("all drift detectors of %d providers have been started", len(d.detectors))) + + if err := group.Wait(); err != nil { + d.logger.Error("failed while running", zap.Error(err)) + return err + } + + d.logger.Info(fmt.Sprintf("all drift detectors of %d providers have been stopped", len(d.detectors))) + return nil +} + +func (d *detector) ReportApplicationSyncState(ctx context.Context, appID string, state model.ApplicationSyncState) error { + d.mu.RLock() + curState, ok := d.syncStates[appID] + d.mu.RUnlock() + + if ok && !curState.HasChanged(state) { + return nil + } + + _, err := d.apiClient.ReportApplicationSyncState(ctx, &pipedservice.ReportApplicationSyncStateRequest{ + ApplicationId: appID, + State: &state, + }) + if err != nil { + d.logger.Error("failed to report application sync state", + zap.String("application-id", appID), + zap.Any("state", state), + zap.Error(err), + ) + return err + } + + d.mu.Lock() + d.syncStates[appID] = state + d.mu.Unlock() + + return nil +} diff --git a/pkg/app/pipedv1/driftdetector/kubernetes/detector.go b/pkg/app/pipedv1/driftdetector/kubernetes/detector.go new file mode 100644 index 0000000000..133d59f366 --- /dev/null +++ b/pkg/app/pipedv1/driftdetector/kubernetes/detector.go @@ -0,0 +1,416 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/kubernetes" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/sourceprocesser" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/diff" + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + ListByPlatformProvider(name string) []*model.Application +} + +type gitClient interface { + Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) +} + +type secretDecrypter interface { + Decrypt(string) (string, error) +} + +type reporter interface { + ReportApplicationSyncState(ctx context.Context, appID string, state model.ApplicationSyncState) error +} + +type Detector interface { + Run(ctx context.Context) error + ProviderName() string +} + +type detector struct { + provider config.PipedPlatformProvider + appLister applicationLister + gitClient gitClient + stateGetter kubernetes.Getter + reporter reporter + appManifestsCache cache.Cache + interval time.Duration + config *config.PipedSpec + secretDecrypter secretDecrypter + logger *zap.Logger + + gitRepos map[string]git.Repo + syncStates map[string]model.ApplicationSyncState +} + +func NewDetector( + cp config.PipedPlatformProvider, + appLister applicationLister, + gitClient gitClient, + stateGetter kubernetes.Getter, + reporter reporter, + appManifestsCache cache.Cache, + cfg *config.PipedSpec, + sd secretDecrypter, + logger *zap.Logger, +) Detector { + + logger = logger.Named("kubernetes-detector").With( + zap.String("platform-provider", cp.Name), + ) + return &detector{ + provider: cp, + appLister: appLister, + gitClient: gitClient, + stateGetter: stateGetter, + reporter: reporter, + appManifestsCache: appManifestsCache, + interval: time.Minute, + config: cfg, + secretDecrypter: sd, + gitRepos: make(map[string]git.Repo), + syncStates: make(map[string]model.ApplicationSyncState), + logger: logger, + } +} + +func (d *detector) Run(ctx context.Context) error { + d.logger.Info("start running drift detector for kubernetes applications") + + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + d.check(ctx) + + case <-ctx.Done(): + d.logger.Info("drift detector for kubernetes applications has been stopped") + return nil + } + } +} + +func (d *detector) check(ctx context.Context) { + appsByRepo := d.listGroupedApplication() + + for repoID, apps := range appsByRepo { + gitRepo, ok := d.gitRepos[repoID] + if !ok { + // Clone repository for the first time. + repoCfg, ok := d.config.GetRepository(repoID) + if !ok { + d.logger.Error(fmt.Sprintf("repository %s was not found in piped configuration", repoID)) + continue + } + gr, err := d.gitClient.Clone(ctx, repoID, repoCfg.Remote, repoCfg.Branch, "") + if err != nil { + d.logger.Error("failed to clone repository", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + gitRepo = gr + d.gitRepos[repoID] = gitRepo + } + + // Fetch the latest commit to compare the states. + branch := gitRepo.GetClonedBranch() + if err := gitRepo.Pull(ctx, branch); err != nil { + d.logger.Error("failed to update repository branch", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + + // Get the head commit of the repository. + headCommit, err := gitRepo.GetLatestCommit(ctx) + if err != nil { + d.logger.Error("failed to get head commit hash", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + + // Start checking all applications in this repository. + for _, app := range apps { + if err := d.checkApplication(ctx, app, gitRepo, headCommit); err != nil { + d.logger.Error(fmt.Sprintf("failed to check application: %s", app.Id), zap.Error(err)) + } + } + } +} + +func (d *detector) checkApplication(ctx context.Context, app *model.Application, repo git.Repo, headCommit git.Commit) error { + watchingResourceKinds := d.stateGetter.GetWatchingResourceKinds() + headManifests, err := d.loadHeadManifests(ctx, app, repo, headCommit, watchingResourceKinds) + if err != nil { + return err + } + headManifests = filterIgnoringManifests(headManifests) + d.logger.Debug(fmt.Sprintf("application %s has %d manifests at commit %s", app.Id, len(headManifests), headCommit.Hash)) + + liveManifests := d.stateGetter.GetAppLiveManifests(app.Id) + liveManifests = filterIgnoringManifests(liveManifests) + d.logger.Debug(fmt.Sprintf("application %s has %d live manifests", app.Id, len(liveManifests))) + + ddCfg, err := d.getDriftDetectionConfig(repo.GetPath(), app) + if err != nil { + return err + } + + ignoreConfig := make(map[string][]string, 0) + if ddCfg != nil { + for _, ignoreField := range ddCfg.IgnoreFields { + // ignoreField is 'apiVersion:kind:namespace:name#fieldPath' + splited := strings.Split(ignoreField, "#") + key, ignoredPath := splited[0], splited[1] + ignoreConfig[key] = append(ignoreConfig[key], ignoredPath) + } + } + + result, err := provider.DiffList( + liveManifests, + headManifests, + d.logger, + diff.WithEquateEmpty(), + diff.WithIgnoreAddingMapKeys(), + diff.WithCompareNumberAndNumericString(), + diff.WithIgnoreConfig(ignoreConfig), + ) + if err != nil { + return err + } + + state := makeSyncState(result, headCommit.Hash) + + return d.reporter.ReportApplicationSyncState(ctx, app.Id, state) +} + +func (d *detector) loadHeadManifests(ctx context.Context, app *model.Application, repo git.Repo, headCommit git.Commit, watchingResourceKinds []provider.APIVersionKind) ([]provider.Manifest, error) { + var ( + manifestCache = provider.AppManifestsCache{ + AppID: app.Id, + Cache: d.appManifestsCache, + Logger: d.logger, + } + repoDir = repo.GetPath() + appDir = filepath.Join(repoDir, app.GitPath.Path) + ) + + manifests, ok := manifestCache.Get(headCommit.Hash) + if !ok { + // When the manifests were not in the cache we have to load them. + cfg, err := d.loadApplicationConfiguration(repoDir, app) + if err != nil { + return nil, fmt.Errorf("failed to load application configuration: %w", err) + } + + gds, ok := cfg.GetGenericApplication() + if !ok { + return nil, fmt.Errorf("unsupport application kind %s", cfg.Kind) + } + + var ( + encryptionUsed = d.secretDecrypter != nil && gds.Encryption != nil + attachmentUsed = gds.Attachment != nil + ) + + // We have to copy repository into another directory because + // decrypting the sealed secrets or attaching files might change the git repository. + if attachmentUsed || encryptionUsed { + dir, err := os.MkdirTemp("", "detector-git-processing") + if err != nil { + return nil, fmt.Errorf("failed to prepare a temporary directory for git repository (%w)", err) + } + defer os.RemoveAll(dir) + + repo, err = repo.Copy(filepath.Join(dir, "repo")) + if err != nil { + return nil, fmt.Errorf("failed to copy the cloned git repository (%w)", err) + } + repoDir = repo.GetPath() + appDir = filepath.Join(repoDir, app.GitPath.Path) + } + + var templProcessors []sourceprocesser.SourceTemplateProcessor + // Decrypting secrets to manifests. + if encryptionUsed { + templProcessors = append(templProcessors, sourceprocesser.NewSecretDecrypterProcessor(gds.Encryption, d.secretDecrypter)) + } + // Then attaching configurated files to manifests. + if attachmentUsed { + templProcessors = append(templProcessors, sourceprocesser.NewAttachmentProcessor(gds.Attachment)) + } + if len(templProcessors) > 0 { + sp := sourceprocesser.NewSourceProcessor(appDir, templProcessors...) + if err := sp.Process(); err != nil { + return nil, fmt.Errorf("failed to process source files: %w", err) + } + } + + loader := provider.NewLoader(app.Name, appDir, repoDir, app.GitPath.ConfigFilename, cfg.KubernetesApplicationSpec.Input, d.gitClient, d.logger) + manifests, err = loader.LoadManifests(ctx) + if err != nil { + err = fmt.Errorf("failed to load new manifests: %w", err) + return nil, err + } + manifestCache.Put(headCommit.Hash, manifests) + } + + watchingMap := make(map[provider.APIVersionKind]struct{}, len(watchingResourceKinds)) + for _, k := range watchingResourceKinds { + watchingMap[k] = struct{}{} + } + + filtered := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + _, ok := watchingMap[provider.APIVersionKind{ + APIVersion: m.Key.APIVersion, + Kind: m.Key.Kind, + }] + if ok { + filtered = append(filtered, m) + } + } + + return filtered, nil +} + +// listGroupedApplication retrieves all applications those should be handled by this director +// and then groups them by repoID. +func (d *detector) listGroupedApplication() map[string][]*model.Application { + var ( + apps = d.appLister.ListByPlatformProvider(d.provider.Name) + m = make(map[string][]*model.Application) + ) + for _, app := range apps { + repoID := app.GitPath.Repo.Id + if _, ok := m[repoID]; !ok { + m[repoID] = []*model.Application{app} + } else { + m[repoID] = append(m[repoID], app) + } + } + return m +} + +func (d *detector) loadApplicationConfiguration(repoPath string, app *model.Application) (*config.Config, error) { + path := filepath.Join(repoPath, app.GitPath.GetApplicationConfigFilePath()) + cfg, err := config.LoadFromYAML(path) + if err != nil { + return nil, err + } + if appKind, ok := cfg.Kind.ToApplicationKind(); !ok || appKind != app.Kind { + return nil, fmt.Errorf("application in application configuration file is not match, got: %s, expected: %s", appKind, app.Kind) + } + + if cfg.KubernetesApplicationSpec != nil && cfg.KubernetesApplicationSpec.Input.HelmChart != nil { + chartRepoName := cfg.KubernetesApplicationSpec.Input.HelmChart.Repository + if chartRepoName != "" { + cfg.KubernetesApplicationSpec.Input.HelmChart.Insecure = d.config.IsInsecureChartRepository(chartRepoName) + } + } + + return cfg, nil +} + +func (d *detector) ProviderName() string { + return d.provider.Name +} + +func filterIgnoringManifests(manifests []provider.Manifest) []provider.Manifest { + out := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + annotations := m.GetAnnotations() + if annotations[provider.LabelIgnoreDriftDirection] == provider.IgnoreDriftDetectionTrue { + continue + } + out = append(out, m) + } + return out +} + +func makeSyncState(r *provider.DiffListResult, commit string) model.ApplicationSyncState { + if r.NoChange() { + return model.ApplicationSyncState{ + Status: model.ApplicationSyncStatus_SYNCED, + ShortReason: "", + Reason: "", + Timestamp: time.Now().Unix(), + } + } + + total := len(r.Adds) + len(r.Deletes) + len(r.Changes) + shortReason := fmt.Sprintf("There are %d manifests not synced (%d adds, %d deletes, %d changes)", total, len(r.Adds), len(r.Deletes), len(r.Changes)) + if len(commit) >= 7 { + commit = commit[:7] + } + + var b strings.Builder + b.WriteString(fmt.Sprintf("Diff between the defined state in Git at commit %s and actual state in cluster:\n\n", commit)) + b.WriteString("--- Actual (LiveState)\n+++ Expected (Git)\n\n") + + details := r.Render(provider.DiffRenderOptions{ + MaskSecret: true, + MaskConfigMap: true, + MaxChangedManifests: 3, + // Currently, we do not use the diff command to render the result + // because Kubernetes adds a large number of default values to the + // running manifest that causes a wrong diff text. + UseDiffCommand: false, + }) + b.WriteString(details) + + return model.ApplicationSyncState{ + Status: model.ApplicationSyncStatus_OUT_OF_SYNC, + ShortReason: shortReason, + Reason: b.String(), + Timestamp: time.Now().Unix(), + } +} + +func (d *detector) getDriftDetectionConfig(repoDir string, app *model.Application) (*config.DriftDetection, error) { + cfg, err := d.loadApplicationConfiguration(repoDir, app) + if err != nil { + return nil, fmt.Errorf("failed to load application configuration: %w", err) + } + gds, ok := cfg.GetGenericApplication() + if !ok { + return nil, fmt.Errorf("unsupport application kind %s", cfg.Kind) + } + + return gds.DriftDetection, nil +} diff --git a/pkg/app/pipedv1/driftdetector/lambda/detector.go b/pkg/app/pipedv1/driftdetector/lambda/detector.go new file mode 100644 index 0000000000..45aa0a7a38 --- /dev/null +++ b/pkg/app/pipedv1/driftdetector/lambda/detector.go @@ -0,0 +1,15 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda diff --git a/pkg/app/pipedv1/driftdetector/terraform/detector.go b/pkg/app/pipedv1/driftdetector/terraform/detector.go new file mode 100644 index 0000000000..6f30237787 --- /dev/null +++ b/pkg/app/pipedv1/driftdetector/terraform/detector.go @@ -0,0 +1,355 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/terraform" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/terraform" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/sourceprocesser" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + ListByPlatformProvider(name string) []*model.Application +} + +type gitClient interface { + Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) +} + +type secretDecrypter interface { + Decrypt(string) (string, error) +} + +type reporter interface { + ReportApplicationSyncState(ctx context.Context, appID string, state model.ApplicationSyncState) error +} + +type Detector interface { + Run(ctx context.Context) error + ProviderName() string +} + +type detector struct { + provider config.PipedPlatformProvider + appLister applicationLister + gitClient gitClient + stateGetter terraform.Getter + reporter reporter + appManifestsCache cache.Cache + interval time.Duration + config *config.PipedSpec + secretDecrypter secretDecrypter + logger *zap.Logger + + gitRepos map[string]git.Repo + syncStates map[string]model.ApplicationSyncState +} + +func NewDetector( + cp config.PipedPlatformProvider, + appLister applicationLister, + gitClient gitClient, + stateGetter terraform.Getter, + reporter reporter, + appManifestsCache cache.Cache, + cfg *config.PipedSpec, + sd secretDecrypter, + logger *zap.Logger, +) Detector { + + logger = logger.Named("terraform-detector").With( + zap.String("platform-provider", cp.Name), + ) + return &detector{ + provider: cp, + appLister: appLister, + gitClient: gitClient, + stateGetter: stateGetter, + reporter: reporter, + appManifestsCache: appManifestsCache, + interval: 10 * time.Minute, + config: cfg, + secretDecrypter: sd, + gitRepos: make(map[string]git.Repo), + syncStates: make(map[string]model.ApplicationSyncState), + logger: logger, + } +} + +func (d *detector) Run(ctx context.Context) error { + d.logger.Info("start running drift detector for terraform applications") + + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + d.check(ctx) + + case <-ctx.Done(): + d.logger.Info("drift detector for terraform applications has been stopped") + return nil + } + } +} + +func (d *detector) check(ctx context.Context) { + appsByRepo := d.listGroupedApplication() + + for repoID, apps := range appsByRepo { + gitRepo, ok := d.gitRepos[repoID] + if !ok { + // Clone repository for the first time. + gr, err := d.cloneGitRepository(ctx, repoID) + if err != nil { + d.logger.Error("failed to clone git repository", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + gitRepo = gr + d.gitRepos[repoID] = gitRepo + } + + // Fetch the latest commit to compare the states. + branch := gitRepo.GetClonedBranch() + if err := gitRepo.Pull(ctx, branch); err != nil { + d.logger.Error("failed to pull repository branch", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + + // Get the head commit of the repository. + headCommit, err := gitRepo.GetLatestCommit(ctx) + if err != nil { + d.logger.Error("failed to get head commit hash", + zap.String("repo-id", repoID), + zap.Error(err), + ) + continue + } + + // Start checking all applications in this repository. + for _, app := range apps { + if err := d.checkApplication(ctx, app, gitRepo, headCommit); err != nil { + d.logger.Error(fmt.Sprintf("failed to check application: %s", app.Id), zap.Error(err)) + } + } + } +} + +func (d *detector) checkApplication(ctx context.Context, app *model.Application, repo git.Repo, headCommit git.Commit) error { + var ( + repoDir = repo.GetPath() + appDir = filepath.Join(repoDir, app.GitPath.Path) + ) + + // Load config + cpCfg := d.provider.TerraformConfig + cfg, err := loadApplicationConfiguration(repoDir, app) + if err != nil { + return fmt.Errorf("failed to load application configuration: %w", err) + } + appCfg := cfg.TerraformApplicationSpec + + gds, ok := cfg.GetGenericApplication() + if !ok { + return fmt.Errorf("unsupport application kind %s", cfg.Kind) + } + + var ( + encryptionUsed = d.secretDecrypter != nil && gds.Encryption != nil + attachmentUsed = gds.Attachment != nil + ) + + // We have to copy repository into another directory because + // decrypting the sealed secrets or attaching files might change the git repository. + if attachmentUsed || encryptionUsed { + dir, err := os.MkdirTemp("", "detector-git-processing") + if err != nil { + return fmt.Errorf("failed to prepare a temporary directory for git repository (%w)", err) + } + defer os.RemoveAll(dir) + + repo, err = repo.Copy(filepath.Join(dir, "repo")) + if err != nil { + return fmt.Errorf("failed to copy the cloned git repository (%w)", err) + } + repoDir = repo.GetPath() + appDir = filepath.Join(repoDir, app.GitPath.Path) + } + + var templProcessors []sourceprocesser.SourceTemplateProcessor + // Decrypting secrets to manifests. + if encryptionUsed { + templProcessors = append(templProcessors, sourceprocesser.NewSecretDecrypterProcessor(gds.Encryption, d.secretDecrypter)) + } + // Then attaching configurated files to manifests. + if attachmentUsed { + templProcessors = append(templProcessors, sourceprocesser.NewAttachmentProcessor(gds.Attachment)) + } + if len(templProcessors) > 0 { + sp := sourceprocesser.NewSourceProcessor(appDir, templProcessors...) + if err := sp.Process(); err != nil { + return fmt.Errorf("failed to process source files: %w", err) + } + } + + // Set up terraform + version := appCfg.Input.TerraformVersion + terraformPath, _, err := toolregistry.DefaultRegistry().Terraform(ctx, version) + if err != nil { + return err + } + + vars := make([]string, 0, len(cpCfg.Vars)+len(appCfg.Input.Vars)) + vars = append(vars, cpCfg.Vars...) + vars = append(vars, appCfg.Input.Vars...) + flags := appCfg.Input.CommandFlags + envs := appCfg.Input.CommandEnvs + + executor := provider.NewTerraform( + terraformPath, + appDir, + provider.WithoutColor(), + provider.WithVars(vars), + provider.WithVarFiles(appCfg.Input.VarFiles), + provider.WithAdditionalFlags(flags.Shared, flags.Init, flags.Plan, flags.Apply), + provider.WithAdditionalEnvs(envs.Shared, envs.Init, envs.Plan, envs.Apply), + ) + + buf := new(bytes.Buffer) + if err := executor.Init(ctx, buf); err != nil { + fmt.Fprintf(buf, "failed while executing terraform init (%v)\n", err) + return err + } + + if ws := appCfg.Input.Workspace; ws != "" { + if err := executor.SelectWorkspace(ctx, ws); err != nil { + fmt.Fprintf(buf, "failed to select workspace %q (%v). You might need to create the workspace before using by command %q\n", + ws, + err, + "terraform workspace new "+ws, + ) + return err + } + fmt.Fprintf(buf, "selected workspace %q\n", ws) + } + + result, err := executor.Plan(ctx, buf) + if err != nil { + fmt.Fprintf(buf, "failed while executing terraform plan (%v)\n", err) + return err + } + + state, err := makeSyncState(result, headCommit.Hash) + if err != nil { + fmt.Fprintf(buf, "failed while calculate terraform sync state (%v)\n", err) + return err + } + + return d.reporter.ReportApplicationSyncState(ctx, app.Id, *state) +} + +func makeSyncState(r provider.PlanResult, commit string) (*model.ApplicationSyncState, error) { + if r.NoChanges() { + return &model.ApplicationSyncState{ + Status: model.ApplicationSyncStatus_SYNCED, + ShortReason: "", + Reason: "", + Timestamp: time.Now().Unix(), + }, nil + } + + total := r.Imports + r.Adds + r.Destroys + r.Changes + shortReason := fmt.Sprintf("There are %d manifests not synced (%d imports, %d adds, %d deletes, %d changes)", total, r.Imports, r.Adds, r.Destroys, r.Changes) + if len(commit) >= 7 { + commit = commit[:7] + } + + var b strings.Builder + b.WriteString(fmt.Sprintf("Diff between the defined state in Git at commit %s and actual live state:\n\n", commit)) + b.WriteString("--- Actual (LiveState)\n+++ Expected (Git)\n\n") + + details, err := r.Render() + if err != nil { + return nil, err + } + b.WriteString(details) + + return &model.ApplicationSyncState{ + Status: model.ApplicationSyncStatus_OUT_OF_SYNC, + ShortReason: shortReason, + Reason: b.String(), + Timestamp: time.Now().Unix(), + }, nil +} + +func (d *detector) cloneGitRepository(ctx context.Context, repoID string) (git.Repo, error) { + repoCfg, ok := d.config.GetRepository(repoID) + if !ok { + return nil, fmt.Errorf("repository %s was not found in piped configuration", repoID) + } + return d.gitClient.Clone(ctx, repoID, repoCfg.Remote, repoCfg.Branch, "") +} + +// listGroupedApplication retrieves all applications those should be handled by this director +// and then groups them by repoID. +func (d *detector) listGroupedApplication() map[string][]*model.Application { + var ( + apps = d.appLister.ListByPlatformProvider(d.provider.Name) + m = make(map[string][]*model.Application) + ) + for _, app := range apps { + repoID := app.GitPath.Repo.Id + m[repoID] = append(m[repoID], app) + } + return m +} + +func loadApplicationConfiguration(repoPath string, app *model.Application) (*config.Config, error) { + path := filepath.Join(repoPath, app.GitPath.GetApplicationConfigFilePath()) + cfg, err := config.LoadFromYAML(path) + if err != nil { + return nil, err + } + if appKind, ok := cfg.Kind.ToApplicationKind(); !ok || appKind != app.Kind { + return nil, fmt.Errorf("application in application configuration file is not match, got: %s, expected: %s", appKind, app.Kind) + } + return cfg, nil +} + +func (d *detector) ProviderName() string { + return d.provider.Name +} diff --git a/pkg/app/pipedv1/executor/analysis/analysis.go b/pkg/app/pipedv1/executor/analysis/analysis.go new file mode 100644 index 0000000000..b39834f65f --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/analysis.go @@ -0,0 +1,366 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "context" + "errors" + "fmt" + "time" + + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + httpprovider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/http" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/log" + logfactory "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/log/factory" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" + metricsfactory "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics/factory" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + skippedByKey = "SkippedBy" +) + +type Executor struct { + executor.Input + + repoDir string + config *config.Config + startTime time.Time + previousElapsedTime time.Duration +} + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error +} + +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &Executor{ + Input: in, + } + } + r.Register(model.StageAnalysis, f) +} + +// Execute spawns and runs multiple analyzer that run a query at the regular time. +// Any of those fail then the stage ends with failure. +func (e *Executor) Execute(sig executor.StopSignal) model.StageStatus { + e.startTime = time.Now() + ctx := sig.Context() + options := e.StageConfig.AnalysisStageOptions + if options == nil { + e.Logger.Error("missing analysis configuration for ANALYSIS stage") + return model.StageStatus_STAGE_FAILURE + } + + ds, err := e.TargetDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.repoDir = ds.RepoDir + e.config = ds.ApplicationConfig + + templateCfg, err := config.LoadAnalysisTemplate(e.repoDir) + if errors.Is(err, config.ErrNotFound) { + e.Logger.Info("config file for AnalysisTemplate not found") + templateCfg = &config.AnalysisTemplateSpec{} + } else if err != nil { + e.LogPersister.Error(err.Error()) + return model.StageStatus_STAGE_FAILURE + } + + timeout := time.Duration(options.Duration) + e.previousElapsedTime = e.retrievePreviousElapsedTime() + if e.previousElapsedTime > 0 { + // Restart from the middle. + timeout -= e.previousElapsedTime + } + defer e.saveElapsedTime(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + eg, ctxWithTimeout := errgroup.WithContext(ctxWithTimeout) + + // Sync the skip command. + var ( + status = model.StageStatus_STAGE_SUCCESS + doneCh = make(chan struct{}) + ) + defer close(doneCh) + go func() { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if !e.checkSkipped(ctx) { + continue + } + status = model.StageStatus_STAGE_SKIPPED + // Stop the context to cancel all running analyses. + cancel() + return + case <-doneCh: + return + } + } + }() + + // Run analyses with metrics providers. + for i := range options.Metrics { + cfg, err := e.getMetricsConfig(options.Metrics[i], templateCfg) + if err != nil { + e.LogPersister.Errorf("Failed to get metrics config: %v", err) + return model.StageStatus_STAGE_FAILURE + } + provider, err := e.newMetricsProvider(cfg.Provider, options.Metrics[i]) + if err != nil { + e.LogPersister.Errorf("Failed to generate metrics provider: %v", err) + return model.StageStatus_STAGE_FAILURE + } + + id := fmt.Sprintf("metrics-%d", i) + args := e.buildAppArgs(options.Metrics[i].Template.AppArgs) + analyzer := newMetricsAnalyzer(id, *cfg, e.startTime, provider, e.AnalysisResultStore, args, e.Logger, e.LogPersister) + + eg.Go(func() error { + e.LogPersister.Infof("[%s] Start metrics analyzer every %s with query template: %q", analyzer.id, cfg.Interval.Duration(), cfg.Query) + return analyzer.run(ctxWithTimeout) + }) + } + // Run analyses with logging providers. + for i := range options.Logs { + analyzer, err := e.newAnalyzerForLog(i, &options.Logs[i], templateCfg) + if err != nil { + e.LogPersister.Errorf("Failed to spawn analyzer for %s: %v", options.Logs[i].Provider, err) + return model.StageStatus_STAGE_FAILURE + } + eg.Go(func() error { + e.LogPersister.Infof("[%s] Start log analyzer", analyzer.id) + return analyzer.run(ctxWithTimeout) + }) + } + // Run analyses with http providers. + for i := range options.HTTPS { + analyzer, err := e.newAnalyzerForHTTP(i, &options.HTTPS[i], templateCfg) + if err != nil { + e.LogPersister.Errorf("Failed to spawn analyzer for HTTP: %v", err) + return model.StageStatus_STAGE_FAILURE + } + eg.Go(func() error { + e.LogPersister.Infof("[%s] Start http analyzer", analyzer.id) + return analyzer.run(ctxWithTimeout) + }) + } + + if err := eg.Wait(); err != nil { + e.LogPersister.Errorf("Analysis failed: %s", err.Error()) + return model.StageStatus_STAGE_FAILURE + } + + status = executor.DetermineStageStatus(sig.Signal(), e.Stage.Status, status) + if status != model.StageStatus_STAGE_SUCCESS { + return status + } + + e.LogPersister.Success("All analyses were successful") + err = e.AnalysisResultStore.PutLatestAnalysisResult(ctx, &model.AnalysisResult{ + StartTime: e.startTime.Unix(), + }) + if err != nil { + e.Logger.Error("failed to send the analysis result", zap.Error(err)) + } + return status +} + +const elapsedTimeKey = "elapsedTime" + +// saveElapsedTime stores the elapsed time of analysis stage into metadata persister. +// The analysis stage can be restarted from the middle even if it ends unexpectedly, +// that's why count should be stored. +func (e *Executor) saveElapsedTime(ctx context.Context) { + elapsedTime := time.Since(e.startTime) + e.previousElapsedTime + metadata := map[string]string{ + elapsedTimeKey: elapsedTime.String(), + } + if err := e.MetadataStore.Stage(e.Stage.Id).PutMulti(ctx, metadata); err != nil { + e.Logger.Error("failed to store metadata", zap.Error(err)) + } +} + +// retrievePreviousElapsedTime sets the elapsed time of analysis stage by decoding metadata. +func (e *Executor) retrievePreviousElapsedTime() time.Duration { + s, ok := e.MetadataStore.Stage(e.Stage.Id).Get(elapsedTimeKey) + if !ok { + return 0 + } + et, err := time.ParseDuration(s) + if err != nil { + e.Logger.Error("unexpected elapsed time is stored", zap.String("stored-value", s), zap.Error(err)) + return 0 + } + return et +} + +func (e *Executor) newAnalyzerForLog(i int, templatable *config.TemplatableAnalysisLog, templateCfg *config.AnalysisTemplateSpec) (*analyzer, error) { + cfg, err := e.getLogConfig(templatable, templateCfg) + if err != nil { + return nil, err + } + provider, err := e.newLogProvider(cfg.Provider) + if err != nil { + return nil, err + } + id := fmt.Sprintf("log-%d", i) + runner := func(ctx context.Context, query string) (bool, string, error) { + return provider.Evaluate(ctx, query) + } + return newAnalyzer(id, provider.Type(), cfg.Query, runner, time.Duration(cfg.Interval), cfg.FailureLimit, cfg.SkipOnNoData, e.Logger, e.LogPersister), nil +} + +func (e *Executor) newAnalyzerForHTTP(i int, templatable *config.TemplatableAnalysisHTTP, templateCfg *config.AnalysisTemplateSpec) (*analyzer, error) { + cfg, err := e.getHTTPConfig(templatable, templateCfg) + if err != nil { + return nil, err + } + provider := httpprovider.NewProvider(time.Duration(cfg.Timeout)) + id := fmt.Sprintf("http-%d", i) + runner := func(ctx context.Context, query string) (bool, string, error) { + return provider.Run(ctx, cfg) + } + return newAnalyzer(id, provider.Type(), "", runner, time.Duration(cfg.Interval), cfg.FailureLimit, cfg.SkipOnNoData, e.Logger, e.LogPersister), nil +} + +func (e *Executor) newMetricsProvider(providerName string, templatable config.TemplatableAnalysisMetrics) (metrics.Provider, error) { + cfg, ok := e.PipedConfig.GetAnalysisProvider(providerName) + if !ok { + return nil, fmt.Errorf("unknown provider name %s", providerName) + } + provider, err := metricsfactory.NewProvider(&templatable, &cfg, e.Logger) + if err != nil { + return nil, err + } + return provider, nil +} + +func (e *Executor) newLogProvider(providerName string) (log.Provider, error) { + cfg, ok := e.PipedConfig.GetAnalysisProvider(providerName) + if !ok { + return nil, fmt.Errorf("unknown provider name %s", providerName) + } + provider, err := logfactory.NewProvider(&cfg, e.Logger) + if err != nil { + return nil, err + } + return provider, nil +} + +// getMetricsConfig renders the given template and returns the metrics config. +// Just returns metrics config if no template specified. +func (e *Executor) getMetricsConfig(templatableCfg config.TemplatableAnalysisMetrics, templateCfg *config.AnalysisTemplateSpec) (*config.AnalysisMetrics, error) { + name := templatableCfg.Template.Name + if name == "" { + return &templatableCfg.AnalysisMetrics, nil + } + + cfg, ok := templateCfg.Metrics[name] + if !ok { + return nil, fmt.Errorf("analysis template %s not found despite template specified", name) + } + return &cfg, nil +} + +// getLogConfig renders the given template and returns the log config. +// Just returns log config if no template specified. +func (e *Executor) getLogConfig(templatableCfg *config.TemplatableAnalysisLog, templateCfg *config.AnalysisTemplateSpec) (*config.AnalysisLog, error) { + name := templatableCfg.Template.Name + if name == "" { + return &templatableCfg.AnalysisLog, nil + } + + cfg, ok := templateCfg.Logs[name] + if !ok { + return nil, fmt.Errorf("analysis template %s not found despite template specified", name) + } + return &cfg, nil +} + +// getHTTPConfig renders the given template and returns the http config. +// Just returns http config if no template specified. +func (e *Executor) getHTTPConfig(templatableCfg *config.TemplatableAnalysisHTTP, templateCfg *config.AnalysisTemplateSpec) (*config.AnalysisHTTP, error) { + name := templatableCfg.Template.Name + if name == "" { + return &templatableCfg.AnalysisHTTP, nil + } + + cfg, ok := templateCfg.HTTPS[name] + if !ok { + return nil, fmt.Errorf("analysis template %s not found despite template specified", name) + } + return &cfg, nil +} + +func (e *Executor) buildAppArgs(customArgs map[string]string) argsTemplate { + args := argsTemplate{ + App: appArgs{ + Name: e.Application.Name, + // TODO: Populate Env + Env: "", + }, + AppCustomArgs: customArgs, + } + if e.config.Kind != config.KindKubernetesApp { + return args + } + namespace := "default" + if n := e.config.KubernetesApplicationSpec.Input.Namespace; n != "" { + namespace = n + } + args.K8s.Namespace = namespace + return args +} + +func (e *Executor) checkSkipped(ctx context.Context) bool { + var skipCmd *model.ReportableCommand + commands := e.CommandLister.ListCommands() + + for i, cmd := range commands { + if cmd.GetSkipStage() != nil { + skipCmd = &commands[i] + break + } + } + if skipCmd == nil { + return false + } + + if err := e.MetadataStore.Stage(e.Stage.Id).Put(ctx, skippedByKey, skipCmd.Commander); err != nil { + e.LogPersister.Errorf("Unable to save the commander who skipped the stage information to deployment, %v", err) + } + e.LogPersister.Infof("Got the skip command from %q", skipCmd.Commander) + e.LogPersister.Infof("This stage has been skipped by user (%s)", skipCmd.Commander) + + if err := skipCmd.Report(ctx, model.CommandStatus_COMMAND_SUCCEEDED, nil, nil); err != nil { + e.Logger.Error("failed to report handled command", zap.Error(err)) + } + return true +} diff --git a/pkg/app/pipedv1/executor/analysis/analyzer.go b/pkg/app/pipedv1/executor/analysis/analyzer.go new file mode 100644 index 0000000000..c1e7954adf --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/analyzer.go @@ -0,0 +1,110 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "context" + "errors" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" +) + +// analyzer contains a query for an analysis provider. +type analyzer struct { + id string + providerType string + evaluate evaluator + query string + interval time.Duration + // The analysis will fail, if this value is exceeded, + failureLimit int + skipOnNoData bool + + logger *zap.Logger + logPersister executor.LogPersister +} + +type evaluator func(ctx context.Context, query string) (expected bool, reason string, err error) + +func newAnalyzer( + id string, + providerType string, + query string, + evaluate evaluator, + interval time.Duration, + failureLimit int, + skipOnNodata bool, + logger *zap.Logger, + logPersister executor.LogPersister, +) *analyzer { + return &analyzer{ + id: id, + providerType: providerType, + evaluate: evaluate, + query: query, + interval: interval, + failureLimit: failureLimit, + skipOnNoData: skipOnNodata, + logPersister: logPersister, + logger: logger.With( + zap.String("analyzer-id", id), + zap.String("provider-type", providerType), + ), + } +} + +// run starts an analysis which runs the query at the given interval, until the context is done. +// It returns an error when the number of failures exceeds the the failureLimit. +func (a *analyzer) run(ctx context.Context) error { + ticker := time.NewTicker(a.interval) + defer ticker.Stop() + + failureCount := 0 + for { + select { + case <-ticker.C: + expected, reason, err := a.evaluate(ctx, a.query) + // Ignore parent's context deadline exceeded error, and return immediately. + if errors.Is(err, context.DeadlineExceeded) && ctx.Err() == context.DeadlineExceeded { + return nil + } + if errors.Is(err, metrics.ErrNoDataFound) && a.skipOnNoData { + a.logPersister.Infof("[%s] The query result evaluation was skipped because \"skipOnNoData\" is true even though no data returned. Reason: %v. Performed query: %q", a.id, err, a.query) + continue + } + if err != nil { + reason = fmt.Sprintf("failed to run query: %s", err.Error()) + } + + if expected { + a.logPersister.Successf("[%s] The query result is expected one. Reason: %s. Performed query: %q", a.id, reason, a.query) + continue + } + + a.logPersister.Errorf("[%s] The query result is unexpected. Reason: %s. Performed query: %q", a.id, reason, a.query) + failureCount++ + if failureCount > a.failureLimit { + return fmt.Errorf("analysis '%s' failed because the failure number exceeded the failure limit (%d)", a.id, a.failureLimit) + } + case <-ctx.Done(): + return nil + } + } +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/alg.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/alg.go new file mode 100644 index 0000000000..bab7649abf --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/alg.go @@ -0,0 +1,126 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +// Miscellaneous helper algorithms + +import ( + "fmt" +) + +func maxint(a, b int) int { + if a > b { + return a + } + return b +} + +func minint(a, b int) int { + if a < b { + return a + } + return b +} + +func sumint(xs []int) int { + sum := 0 + for _, x := range xs { + sum += x + } + return sum +} + +// bisect returns an x in [low, high] such that |f(x)| <= tolerance +// using the bisection method. +// +// f(low) and f(high) must have opposite signs. +// +// If f does not have a root in this interval (e.g., it is +// discontiguous), this returns the X of the apparent discontinuity +// and false. +func bisect(f func(float64) float64, low, high, tolerance float64) (float64, bool) { + flow, fhigh := f(low), f(high) + if -tolerance <= flow && flow <= tolerance { + return low, true + } + if -tolerance <= fhigh && fhigh <= tolerance { + return high, true + } + if mathSign(flow) == mathSign(fhigh) { + panic(fmt.Sprintf("root of f is not bracketed by [low, high]; f(%g)=%g f(%g)=%g", low, flow, high, fhigh)) + } + for { + mid := (high + low) / 2 + fmid := f(mid) + if -tolerance <= fmid && fmid <= tolerance { + return mid, true + } + if mid == high || mid == low { + return mid, false + } + if mathSign(fmid) == mathSign(flow) { + low = mid + flow = fmid + } else { + high = mid + fhigh = fmid + } + } +} + +// bisectBool implements the bisection method on a boolean function. +// It returns x1, x2 ∈ [low, high], x1 < x2 such that f(x1) != f(x2) +// and x2 - x1 <= xtol. +// +// If f(low) == f(high), it panics. +func bisectBool(f func(float64) bool, low, high, xtol float64) (x1, x2 float64) { + flow, fhigh := f(low), f(high) + if flow == fhigh { + panic(fmt.Sprintf("root of f is not bracketed by [low, high]; f(%g)=%v f(%g)=%v", low, flow, high, fhigh)) + } + for { + if high-low <= xtol { + return low, high + } + mid := (high + low) / 2 + if mid == high || mid == low { + return low, high + } + fmid := f(mid) + if fmid == flow { + low = mid + flow = fmid + } else { + high = mid + fhigh = fmid + } + } +} + +// series returns the sum of the series f(0), f(1), ... +// +// This implementation is fast, but subject to round-off error. +func series(f func(float64) float64) float64 { + y, yp := 0.0, 1.0 + for n := 0.0; y != yp; n++ { + yp = y + y += f(n) + } + return y +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/dist.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/dist.go new file mode 100644 index 0000000000..68bd0282a3 --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/dist.go @@ -0,0 +1,224 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import "math/rand" + +// A DistCommon is a statistical distribution. DistCommon is a base +// interface provided by both continuous and discrete distributions. +type DistCommon interface { + // CDF returns the cumulative probability Pr[X <= x]. + // + // For continuous distributions, the CDF is the integral of + // the PDF from -inf to x. + // + // For discrete distributions, the CDF is the sum of the PMF + // at all defined points from -inf to x, inclusive. Note that + // the CDF of a discrete distribution is defined for the whole + // real line (unlike the PMF) but has discontinuities where + // the PMF is non-zero. + // + // The CDF is a monotonically increasing function and has a + // domain of all real numbers. If the distribution has bounded + // support, it has a range of [0, 1]; otherwise it has a range + // of (0, 1). Finally, CDF(-inf)==0 and CDF(inf)==1. + CDF(x float64) float64 + + // Bounds returns reasonable bounds for this distribution's + // PDF/PMF and CDF. The total weight outside of these bounds + // should be approximately 0. + // + // For a discrete distribution, both bounds are integer + // multiples of Step(). + // + // If this distribution has finite support, it returns exact + // bounds l, h such that CDF(l')=0 for all l' < l and + // CDF(h')=1 for all h' >= h. + Bounds() (float64, float64) +} + +// A Dist is a continuous statistical distribution. +type Dist interface { + DistCommon + + // PDF returns the value of the probability density function + // of this distribution at x. + PDF(x float64) float64 +} + +// A DiscreteDist is a discrete statistical distribution. +// +// Most discrete distributions are defined only at integral values of +// the random variable. However, some are defined at other intervals, +// so this interface takes a float64 value for the random variable. +// The probability mass function rounds down to the nearest defined +// point. Note that float64 values can exactly represent integer +// values between ±2**53, so this generally shouldn't be an issue for +// integer-valued distributions (likewise, for half-integer-valued +// distributions, float64 can exactly represent all values between +// ±2**52). +type DiscreteDist interface { + DistCommon + + // PMF returns the value of the probability mass function + // Pr[X = x'], where x' is x rounded down to the nearest + // defined point on the distribution. + // + // Note for implementers: for integer-valued distributions, + // round x using int(math.Floor(x)). Do not use int(x), since + // that truncates toward zero (unless all x <= 0 are handled + // the same). + PMF(x float64) float64 + + // Step returns s, where the distribution is defined for sℕ. + Step() float64 +} + +// TODO: Add a Support method for finite support distributions? Or +// maybe just another return value from Bounds indicating that the +// bounds are exact? + +// TODO: Plot method to return a pre-configured Plot object with +// reasonable bounds and an integral function? Have to distinguish +// PDF/CDF/InvCDF. Three methods? Argument? +// +// Doesn't have to be a method of Dist. Could be just a function that +// takes a Dist and uses Bounds. + +// InvCDF returns the inverse CDF function of the given distribution +// (also known as the quantile function or the percent point +// function). This is a function f such that f(dist.CDF(x)) == x. If +// dist.CDF is only weakly monotonic (that it, there are intervals +// over which it is constant) and y > 0, f returns the smallest x that +// satisfies this condition. In general, the inverse CDF is not +// well-defined for y==0, but for convenience if y==0, f returns the +// largest x that satisfies this condition. For distributions with +// infinite support both the largest and smallest x are -Inf; however, +// for distributions with finite support, this is the lower bound of +// the support. +// +// If y < 0 or y > 1, f returns NaN. +// +// If dist implements InvCDF(float64) float64, this returns that +// method. Otherwise, it returns a function that uses a generic +// numerical method to construct the inverse CDF at y by finding x +// such that dist.CDF(x) == y. This may have poor precision around +// points of discontinuity, including f(0) and f(1). +func InvCDF(dist DistCommon) func(y float64) (x float64) { + type invCDF interface { + InvCDF(float64) float64 + } + if dist, ok := dist.(invCDF); ok { + return dist.InvCDF + } + + // Otherwise, use a numerical algorithm. + // + // TODO: For discrete distributions, use the step size to + // inform this computation. + return func(y float64) (x float64) { + const almostInf = 1e100 + const xtol = 1e-16 + + if y < 0 || y > 1 { + return nan + } else if y == 0 { + l, _ := dist.Bounds() + if dist.CDF(l) == 0 { + // Finite support + return l + } else { + // Infinite support + return -inf + } + } else if y == 1 { + _, h := dist.Bounds() + if dist.CDF(h) == 1 { + // Finite support + return h + } else { + // Infinite support + return inf + } + } + + // Find loX, hiX for which cdf(loX) < y <= cdf(hiX). + var loX, loY, hiX, hiY float64 + x1, y1 := 0.0, dist.CDF(0) + xdelta := 1.0 + if y1 < y { + hiX, hiY = x1, y1 + for hiY < y && hiX != inf { + loX, loY, hiX = hiX, hiY, hiX+xdelta + hiY = dist.CDF(hiX) + xdelta *= 2 + } + } else { + loX, loY = x1, y1 + for y <= loY && loX != -inf { + hiX, hiY, loX = loX, loY, loX-xdelta + loY = dist.CDF(loX) + xdelta *= 2 + } + } + if loX == -inf { + return loX + } else if hiX == inf { + return hiX + } + + // Use bisection on the interval to find the smallest + // x at which cdf(x) <= y. + _, x = bisectBool(func(x float64) bool { + return dist.CDF(x) < y + }, loX, hiX, xtol) + return + } +} + +// Rand returns a random number generator that draws from the given +// distribution. The returned generator takes an optional source of +// randomness; if this is nil, it uses the default global source. +// +// If dist implements Rand(*rand.Rand) float64, Rand returns that +// method. Otherwise, it returns a generic generator based on dist's +// inverse CDF (which may in turn use an efficient implementation or a +// generic numerical implementation; see InvCDF). +func Rand(dist DistCommon) func(*rand.Rand) float64 { + type distRand interface { + Rand(*rand.Rand) float64 + } + if dist, ok := dist.(distRand); ok { + return dist.Rand + } + + // Otherwise, use a generic algorithm. + inv := InvCDF(dist) + return func(r *rand.Rand) float64 { + var y float64 + for y == 0 { + if r == nil { + y = rand.Float64() + } else { + y = r.Float64() + } + } + return inv(y) + } +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/mannwhitney.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/mannwhitney.go new file mode 100644 index 0000000000..6dcf8a6f1d --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/mannwhitney.go @@ -0,0 +1,297 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import ( + "errors" + "math" + "sort" +) + +// A LocationHypothesis specifies the alternative hypothesis of a +// location test such as a t-test or a Mann-Whitney U-test. The +// default (zero) value is to test against the alternative hypothesis +// that they differ. +type LocationHypothesis int + +const ( + // LocationLess specifies the alternative hypothesis that the + // location of the first sample is less than the second. This + // is a one-tailed test. + LocationLess LocationHypothesis = -1 + + // LocationDiffers specifies the alternative hypothesis that + // the locations of the two samples are not equal. This is a + // two-tailed test. + LocationDiffers LocationHypothesis = 0 + + // LocationGreater specifies the alternative hypothesis that + // the location of the first sample is greater than the + // second. This is a one-tailed test. + LocationGreater LocationHypothesis = 1 +) + +var ( + inf = math.Inf(1) + nan = math.NaN() + + ErrSamplesEqual = errors.New("all samples are equal") + ErrSampleSize = errors.New("sample is too small") + ErrZeroVariance = errors.New("sample has zero variance") + ErrMismatchedSamples = errors.New("samples have different lengths") +) + +// A MannWhitneyUTestResult is the result of a Mann-Whitney U-test. +type MannWhitneyUTestResult struct { + // N1 and N2 are the sizes of the input samples. + N1, N2 int + + // U is the value of the Mann-Whitney U statistic for this + // test, generalized by counting ties as 0.5. + // + // Given the Cartesian product of the two samples, this is the + // number of pairs in which the value from the first sample is + // greater than the value of the second, plus 0.5 times the + // number of pairs where the values from the two samples are + // equal. Hence, U is always an integer multiple of 0.5 (it is + // a whole integer if there are no ties) in the range [0, N1*N2]. + // + // U statistics always come in pairs, depending on which + // sample is "first". The mirror U for the other sample can be + // calculated as N1*N2 - U. + // + // There are many equivalent statistics with slightly + // different definitions. The Wilcoxon (1945) W statistic + // (generalized for ties) is U + (N1(N1+1))/2. It is also + // common to use 2U to eliminate the half steps and Smid + // (1956) uses N1*N2 - 2U to additionally center the + // distribution. + U float64 + + // AltHypothesis specifies the alternative hypothesis tested + // by this test against the null hypothesis that there is no + // difference in the locations of the samples. + AltHypothesis LocationHypothesis + + // P is the p-value of the Mann-Whitney test for the given + // null hypothesis. + P float64 +} + +// MannWhitneyExactLimit gives the largest sample size for which the +// exact U distribution will be used for the Mann-Whitney U-test. +// +// Using the exact distribution is necessary for small sample sizes +// because the distribution is highly irregular. However, computing +// the distribution for large sample sizes is both computationally +// expensive and unnecessary because it quickly approaches a normal +// approximation. Computing the distribution for two 50 value samples +// takes a few milliseconds on a 2014 laptop. +var MannWhitneyExactLimit = 50 + +// MannWhitneyTiesExactLimit gives the largest sample size for which +// the exact U distribution will be used for the Mann-Whitney U-test +// in the presence of ties. +// +// Computing this distribution is more expensive than computing the +// distribution without ties, so this is set lower. Computing this +// distribution for two 25 value samples takes about ten milliseconds +// on a 2014 laptop. +var MannWhitneyTiesExactLimit = 25 + +// MannWhitneyUTest performs a Mann-Whitney U-test [1,2] of the null +// hypothesis that two samples come from the same population against +// the alternative hypothesis that one sample tends to have larger or +// smaller values than the other. +// +// This is similar to a t-test, but unlike the t-test, the +// Mann-Whitney U-test is non-parametric (it does not assume a normal +// distribution). It has very slightly lower efficiency than the +// t-test on normal distributions. +// +// Computing the exact U distribution is expensive for large sample +// sizes, so this uses a normal approximation for sample sizes larger +// than MannWhitneyExactLimit if there are no ties or +// MannWhitneyTiesExactLimit if there are ties. This normal +// approximation uses both the tie correction and the continuity +// correction. +// +// This can fail with ErrSampleSize if either sample is empty or +// ErrSamplesEqual if all sample values are equal. +// +// This is also known as a Mann-Whitney-Wilcoxon test and is +// equivalent to the Wilcoxon rank-sum test, though the Wilcoxon +// rank-sum test differs in nomenclature. +// +// [1] Mann, Henry B.; Whitney, Donald R. (1947). "On a Test of +// Whether one of Two Random Variables is Stochastically Larger than +// the Other". Annals of Mathematical Statistics 18 (1): 50–60. +// +// [2] Klotz, J. H. (1966). "The Wilcoxon, Ties, and the Computer". +// Journal of the American Statistical Association 61 (315): 772-787. +func MannWhitneyUTest(x1, x2 []float64, alt LocationHypothesis) (*MannWhitneyUTestResult, error) { + n1, n2 := len(x1), len(x2) + if n1 == 0 || n2 == 0 { + return nil, ErrSampleSize + } + + // Compute the U statistic and tie vector T. + x1 = append([]float64(nil), x1...) + x2 = append([]float64(nil), x2...) + sort.Float64s(x1) + sort.Float64s(x2) + merged, labels := labeledMerge(x1, x2) + + R1 := 0.0 + T, hasTies := []int{}, false + for i := 0; i < len(merged); { + rank1, nx1, v1 := i+1, 0, merged[i] + // Consume samples that tie this sample (including itself). + for ; i < len(merged) && merged[i] == v1; i++ { + if labels[i] == 1 { + nx1++ + } + } + // Assign all tied samples the average rank of the + // samples, where merged[0] has rank 1. + if nx1 != 0 { + rank := float64(i+rank1) / 2 + R1 += rank * float64(nx1) + } + T = append(T, i-rank1+1) + if i > rank1 { + hasTies = true + } + } + U1 := R1 - float64(n1*(n1+1))/2 + + // Compute the smaller of U1 and U2 + U2 := float64(n1*n2) - U1 + Usmall := math.Min(U1, U2) + + var p float64 + if !hasTies && n1 <= MannWhitneyExactLimit && n2 <= MannWhitneyExactLimit || + hasTies && n1 <= MannWhitneyTiesExactLimit && n2 <= MannWhitneyTiesExactLimit { + // Use exact U distribution. U1 will be an integer. + if len(T) == 1 { + // All values are equal. Test is meaningless. + return nil, ErrSamplesEqual + } + + dist := UDist{N1: n1, N2: n2, T: T} + switch alt { + case LocationDiffers: + if U1 == U2 { + // The distribution is symmetric about + // Usmall. Since the distribution is + // discrete, the CDF is discontinuous + // and if simply double CDF(Usmall), + // we'll double count the + // (non-infinitesimal) probability + // mass at Usmall. What we want is + // just the integral of the whole CDF, + // which is 1. + p = 1 + } else { + p = dist.CDF(Usmall) * 2 + } + + case LocationLess: + p = dist.CDF(U1) + + case LocationGreater: + p = 1 - dist.CDF(U1-1) + } + } else { + // Use normal approximation (with tie and continuity + // correction). + t := tieCorrection(T) + N := float64(n1 + n2) + μ_U := float64(n1*n2) / 2 + σ_U := math.Sqrt(float64(n1*n2) * ((N + 1) - t/(N*(N-1))) / 12) + if σ_U == 0 { + return nil, ErrSamplesEqual + } + numer := U1 - μ_U + // Perform continuity correction. + switch alt { + case LocationDiffers: + numer -= mathSign(numer) * 0.5 + case LocationLess: + numer += 0.5 + case LocationGreater: + numer -= 0.5 + } + z := numer / σ_U + switch alt { + case LocationDiffers: + p = 2 * math.Min(StdNormal.CDF(z), 1-StdNormal.CDF(z)) + case LocationLess: + p = StdNormal.CDF(z) + case LocationGreater: + p = 1 - StdNormal.CDF(z) + } + } + + return &MannWhitneyUTestResult{N1: n1, N2: n2, U: U1, + AltHypothesis: alt, P: p}, nil +} + +// labeledMerge merges sorted lists x1 and x2 into sorted list merged. +// labels[i] is 1 or 2 depending on whether merged[i] is a value from +// x1 or x2, respectively. +func labeledMerge(x1, x2 []float64) (merged []float64, labels []byte) { + merged = make([]float64, len(x1)+len(x2)) + labels = make([]byte, len(x1)+len(x2)) + + i, j, o := 0, 0, 0 + for i < len(x1) && j < len(x2) { + if x1[i] < x2[j] { + merged[o] = x1[i] + labels[o] = 1 + i++ + } else { + merged[o] = x2[j] + labels[o] = 2 + j++ + } + o++ + } + for ; i < len(x1); i++ { + merged[o] = x1[i] + labels[o] = 1 + o++ + } + for ; j < len(x2); j++ { + merged[o] = x2[j] + labels[o] = 2 + o++ + } + return +} + +// tieCorrection computes the tie correction factor Σ_j (t_j³ - t_j) +// where t_j is the number of ties in the j'th rank. +func tieCorrection(ties []int) float64 { + t := 0 + for _, tie := range ties { + t += tie*tie*tie - tie + } + return float64(t) +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/mannwhitney_test.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/mannwhitney_test.go new file mode 100644 index 0000000000..9546d0f0d5 --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/mannwhitney_test.go @@ -0,0 +1,96 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import "testing" + +func TestMannWhitneyUTest(t *testing.T) { + t.Parallel() + + check := func(want, got *MannWhitneyUTestResult) { + if want.N1 != got.N1 || want.N2 != got.N2 || + !aeq(t, want.U, got.U) || + want.AltHypothesis != got.AltHypothesis || + !aeq(t, want.P, got.P) { + t.Errorf("want %+v, got %+v", want, got) + } + } + check3 := func(x1, x2 []float64, U float64, pless, pdiff, pgreater float64) { + want := &MannWhitneyUTestResult{N1: len(x1), N2: len(x2), U: U} + + want.AltHypothesis = LocationLess + want.P = pless + got, _ := MannWhitneyUTest(x1, x2, want.AltHypothesis) + check(want, got) + + want.AltHypothesis = LocationDiffers + want.P = pdiff + got, _ = MannWhitneyUTest(x1, x2, want.AltHypothesis) + check(want, got) + + want.AltHypothesis = LocationGreater + want.P = pgreater + got, _ = MannWhitneyUTest(x1, x2, want.AltHypothesis) + check(want, got) + } + + s1 := []float64{2, 1, 3, 5} + s2 := []float64{12, 11, 13, 15} + s3 := []float64{0, 4, 6, 7} // Interleaved with s1, but no ties + s4 := []float64{2, 2, 2, 2} + s5 := []float64{1, 1, 1, 1, 1} + + // Small sample, no ties + check3(s1, s2, 0, 0.014285714285714289, 0.028571428571428577, 1) + check3(s2, s1, 16, 1, 0.028571428571428577, 0.014285714285714289) + check3(s1, s3, 5, 0.24285714285714288, 0.485714285714285770, 0.8285714285714285) + + // Small sample, ties + // TODO: Check these against some other implementation. + check3(s1, s1, 8, 0.6285714285714286, 1, 0.6285714285714286) + check3(s1, s4, 10, 0.8571428571428571, 0.7142857142857143, 0.3571428571428571) + check3(s1, s5, 17.5, 1, 0, 0.04761904761904767) + + r, err := MannWhitneyUTest(s4, s4, LocationDiffers) + if err != ErrSamplesEqual { + t.Errorf("want ErrSamplesEqual, got %+v, %+v", r, err) + } + + // Large samples. + l1 := make([]float64, 500) + for i := range l1 { + l1[i] = float64(i * 2) + } + l2 := make([]float64, 600) + for i := range l2 { + l2[i] = float64(i*2 - 41) + } + l3 := append([]float64{}, l2...) + for i := 0; i < 30; i++ { + l3[i] = l1[i] + } + // For comparing with R's wilcox.test: + // l1 <- seq(0, 499)*2 + // l2 <- seq(0,599)*2-41 + // l3 <- l2; for (i in 1:30) { l3[i] = l1[i] } + + check3(l1, l2, 135250, 0.0024667680407086112, 0.0049335360814172224, 0.9975346930458906) + check3(l1, l1, 125000, 0.5000436801680628, 1, 0.5000436801680628) + check3(l1, l3, 134845, 0.0019351907119808942, 0.0038703814239617884, 0.9980659818257166) +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/mathx.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/mathx.go new file mode 100644 index 0000000000..0cf76e524f --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/mathx.go @@ -0,0 +1,89 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import "math" + +// mathSign returns the sign of x: -1 if x < 0, 0 if x == 0, 1 if x > 0. +// If x is NaN, it returns NaN. +func mathSign(x float64) float64 { + if x == 0 { + return 0 + } else if x < 0 { + return -1 + } else if x > 0 { + return 1 + } + return nan +} + +const smallFactLimit = 20 // 20! => 62 bits +var smallFact [smallFactLimit + 1]int64 + +func init() { + smallFact[0] = 1 + fact := int64(1) + for n := int64(1); n <= smallFactLimit; n++ { + fact *= n + smallFact[n] = fact + } +} + +// mathChoose returns the binomial coefficient of n and k. +func mathChoose(n, k int) float64 { + if k == 0 || k == n { + return 1 + } + if k < 0 || n < k { + return 0 + } + if n <= smallFactLimit { // Implies k <= smallFactLimit + // It's faster to do several integer multiplications + // than it is to do an extra integer division. + // Remarkably, this is also faster than pre-computing + // Pascal's triangle (presumably because this is very + // cache efficient). + numer := int64(1) + for n1 := int64(n - (k - 1)); n1 <= int64(n); n1++ { + numer *= n1 + } + denom := smallFact[k] + return float64(numer / denom) + } + + return math.Exp(lchoose(n, k)) +} + +// mathLchoose returns math.Log(mathChoose(n, k)). +func mathLchoose(n, k int) float64 { + if k == 0 || k == n { + return 0 + } + if k < 0 || n < k { + return math.NaN() + } + return lchoose(n, k) +} + +func lchoose(n, k int) float64 { + a, _ := math.Lgamma(float64(n + 1)) + b, _ := math.Lgamma(float64(k + 1)) + c, _ := math.Lgamma(float64(n - k + 1)) + return a - b - c +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/normaldist.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/normaldist.go new file mode 100644 index 0000000000..90fbce4bad --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/normaldist.go @@ -0,0 +1,155 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import ( + "math" + "math/rand" +) + +// NormalDist is a normal (Gaussian) distribution with mean Mu and +// standard deviation Sigma. +type NormalDist struct { + Mu, Sigma float64 +} + +// StdNormal is the standard normal distribution (Mu = 0, Sigma = 1) +var StdNormal = NormalDist{0, 1} + +// 1/sqrt(2 * pi) +const invSqrt2Pi = 0.39894228040143267793994605993438186847585863116493465766592583 + +func (n NormalDist) PDF(x float64) float64 { + z := x - n.Mu + return math.Exp(-z*z/(2*n.Sigma*n.Sigma)) * invSqrt2Pi / n.Sigma +} + +func (n NormalDist) pdfEach(xs []float64) []float64 { + res := make([]float64, len(xs)) + if n.Mu == 0 && n.Sigma == 1 { + // Standard normal fast path + for i, x := range xs { + res[i] = math.Exp(-x*x/2) * invSqrt2Pi + } + } else { + a := -1 / (2 * n.Sigma * n.Sigma) + b := invSqrt2Pi / n.Sigma + for i, x := range xs { + z := x - n.Mu + res[i] = math.Exp(z*z*a) * b + } + } + return res +} + +func (n NormalDist) CDF(x float64) float64 { + return math.Erfc(-(x-n.Mu)/(n.Sigma*math.Sqrt2)) / 2 +} + +func (n NormalDist) cdfEach(xs []float64) []float64 { + res := make([]float64, len(xs)) + a := 1 / (n.Sigma * math.Sqrt2) + for i, x := range xs { + res[i] = math.Erfc(-(x-n.Mu)*a) / 2 + } + return res +} + +func (n NormalDist) InvCDF(p float64) (x float64) { + // This is based on Peter John Acklam's inverse normal CDF + // algorithm: http://home.online.no/~pjacklam/notes/invnorm/ + const ( + a1 = -3.969683028665376e+01 + a2 = 2.209460984245205e+02 + a3 = -2.759285104469687e+02 + a4 = 1.383577518672690e+02 + a5 = -3.066479806614716e+01 + a6 = 2.506628277459239e+00 + + b1 = -5.447609879822406e+01 + b2 = 1.615858368580409e+02 + b3 = -1.556989798598866e+02 + b4 = 6.680131188771972e+01 + b5 = -1.328068155288572e+01 + + c1 = -7.784894002430293e-03 + c2 = -3.223964580411365e-01 + c3 = -2.400758277161838e+00 + c4 = -2.549732539343734e+00 + c5 = 4.374664141464968e+00 + c6 = 2.938163982698783e+00 + + d1 = 7.784695709041462e-03 + d2 = 3.224671290700398e-01 + d3 = 2.445134137142996e+00 + d4 = 3.754408661907416e+00 + + plow = 0.02425 + phigh = 1 - plow + ) + + if p < 0 || p > 1 { + return nan + } else if p == 0 { + return -inf + } else if p == 1 { + return inf + } + + if p < plow { + // Rational approximation for lower region. + q := math.Sqrt(-2 * math.Log(p)) + x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else if phigh < p { + // Rational approximation for upper region. + q := math.Sqrt(-2 * math.Log(1-p)) + x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else { + // Rational approximation for central region. + q := p - 0.5 + r := q * q + x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q / + (((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1) + } + + // Refine approximation. + e := 0.5*math.Erfc(-x/math.Sqrt2) - p + u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2) + x = x - u/(1+x*u/2) + + // Adjust from standard normal. + return x*n.Sigma + n.Mu +} + +func (n NormalDist) Rand(r *rand.Rand) float64 { + var x float64 + if r == nil { + x = rand.NormFloat64() + } else { + x = r.NormFloat64() + } + return x*n.Sigma + n.Mu +} + +func (n NormalDist) Bounds() (float64, float64) { + const stddevs = 3 + return n.Mu - stddevs*n.Sigma, n.Mu + stddevs*n.Sigma +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/udist.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/udist.go new file mode 100644 index 0000000000..75da4f3e5e --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/udist.go @@ -0,0 +1,401 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import ( + "math" +) + +// A UDist is the discrete probability distribution of the +// Mann-Whitney U statistic for a pair of samples of sizes N1 and N2. +// +// The details of computing this distribution with no ties can be +// found in Mann, Henry B.; Whitney, Donald R. (1947). "On a Test of +// Whether one of Two Random Variables is Stochastically Larger than +// the Other". Annals of Mathematical Statistics 18 (1): 50–60. +// Computing this distribution in the presence of ties is described in +// Klotz, J. H. (1966). "The Wilcoxon, Ties, and the Computer". +// Journal of the American Statistical Association 61 (315): 772-787 +// and Cheung, Ying Kuen; Klotz, Jerome H. (1997). "The Mann Whitney +// Wilcoxon Distribution Using Linked Lists". Statistica Sinica 7: +// 805-813 (the former paper contains details that are glossed over in +// the latter paper but has mathematical typesetting issues, so it's +// easiest to get the context from the former paper and the details +// from the latter). +type UDist struct { + N1, N2 int + + // T is the count of the number of ties at each rank in the + // input distributions. T may be nil, in which case it is + // assumed there are no ties (which is equivalent to an M+N + // slice of 1s). It must be the case that Sum(T) == M+N. + T []int +} + +// hasTies returns true if d has any tied samples. +func (d UDist) hasTies() bool { + for _, t := range d.T { + if t > 1 { + return true + } + } + return false +} + +// p returns the p_{d.N1,d.N2} function defined by Mann, Whitney 1947 +// for values of U from 0 up to and including the U argument. +// +// This algorithm runs in Θ(N1*N2*U) = O(N1²N2²) time and is quite +// fast for small values of N1 and N2. However, it does not handle ties. +func (d UDist) p(U int) []float64 { + // This is a dynamic programming implementation of the + // recursive recurrence definition given by Mann and Whitney: + // + // p_{n,m}(U) = (n * p_{n-1,m}(U-m) + m * p_{n,m-1}(U)) / (n+m) + // p_{n,m}(U) = 0 if U < 0 + // p_{0,m}(U) = p{n,0}(U) = 1 / nCr(m+n, n) if U = 0 + // = 0 if U > 0 + // + // (Note that there is a typo in the original paper. The first + // recursive application of p should be for U-m, not U-M.) + // + // Since p{n,m} only depends on p{n-1,m} and p{n,m-1}, we only + // need to store one "plane" of the three dimensional space at + // a time. + // + // Furthermore, p_{n,m} = p_{m,n}, so we only construct values + // for n <= m and obtain the rest through symmetry. + // + // We organize the computed values of p as followed: + // + // n → N + // m * + // ↓ * * + // * * * + // * * * * + // * * * * + // M * * * * + // + // where each * is a slice indexed by U. The code below + // computes these left-to-right, top-to-bottom, so it only + // stores one row of this matrix at a time. Furthermore, + // computing an element in a given U slice only depends on the + // same and smaller values of U, so we can overwrite the U + // slice we're computing in place as long as we start with the + // largest value of U. Finally, even though the recurrence + // depends on (n,m) above the diagonal and we use symmetry to + // mirror those across the diagonal to (m,n), the mirrored + // indexes are always available in the current row, so this + // mirroring does not interfere with our ability to recycle + // state. + + N, M := d.N1, d.N2 + if N > M { + N, M = M, N + } + + memo := make([][]float64, N+1) + for n := range memo { + memo[n] = make([]float64, U+1) + } + + for m := 0; m <= M; m++ { + // Compute p_{0,m}. This is zero except for U=0. + memo[0][0] = 1 + + // Compute the remainder of this row. + nlim := N + if m < nlim { + nlim = m + } + for n := 1; n <= nlim; n++ { + lp := memo[n-1] // p_{n-1,m} + var rp []float64 + if n <= m-1 { + rp = memo[n] // p_{n,m-1} + } else { + rp = memo[m-1] // p{m-1,n} and m==n + } + + // For a given n,m, U is at most n*m. + // + // TODO: Actually, it's at most ⌈n*m/2⌉, but + // then we need to use more complex symmetries + // in the inner loop below. + ulim := n * m + if U < ulim { + ulim = U + } + + out := memo[n] // p_{n,m} + nplusm := float64(n + m) + for U1 := ulim; U1 >= 0; U1-- { + l := 0.0 + if U1-m >= 0 { + l = float64(n) * lp[U1-m] + } + r := float64(m) * rp[U1] + out[U1] = (l + r) / nplusm + } + } + } + return memo[N] +} + +type ukey struct { + n1 int // size of first sample + twoU int // 2*U statistic for this permutation +} + +// This computes the cumulative counts of the Mann-Whitney U +// distribution in the presence of ties using the computation from +// Cheung, Ying Kuen; Klotz, Jerome H. (1997). "The Mann Whitney +// Wilcoxon Distribution Using Linked Lists". Statistica Sinica 7: +// 805-813, with much guidance from appendix L of Klotz, A +// Computational Approach to Statistics. +// +// makeUmemo constructs a table memo[K][ukey{n1, 2*U}], where K is the +// number of ranks (up to len(t)), n1 is the size of the first sample +// (up to the n1 argument), and U is the U statistic (up to the +// argument twoU/2). The value of an entry in the memo table is the +// number of permutations of a sample of size n1 in a ranking with tie +// vector t[:K] having a U statistic <= U. +func makeUmemo(twoU, n1 int, t []int) []map[ukey]float64 { + // Another candidate for a fast implementation is van de Wiel, + // "The split-up algorithm: a fast symbolic method for + // computing p-values of distribution-free statistics". This + // is what's used by R's coin package. It's a comparatively + // recent publication, so it's presumably faster (or perhaps + // just more general) than previous techniques, but I can't + // get my hands on the paper. + // + // TODO: ~40% of this function's time is spent in mapassign on + // the assignment lines in the two loops and another ~20% in + // map access and iteration. Improving map behavior or + // replacing the maps altogether with some other constant-time + // structure could double performance. + // + // TODO: The worst case for this function is when there are + // few ties. Yet the best case overall is when there are *no* + // ties. Can we get the best of both worlds? Use the fast + // algorithm for the most part when there are few ties and mix + // in the general algorithm just where we need it? That's + // certainly possible for sub-problems where t[:k] has no + // ties, but that doesn't help if t[0] has a tie but nothing + // else does. Is it possible to rearrange the ranks without + // messing up our computation of the U statistic for + // sub-problems? + + K := len(t) + + // Compute a coefficients. The a slice is indexed by k (a[0] + // is unused). + a := make([]int, K+1) + a[1] = t[0] + for k := 2; k <= K; k++ { + a[k] = a[k-1] + t[k-2] + t[k-1] + } + + // Create the memo table for the counts function, A. The A + // slice is indexed by k (A[0] is unused). + // + // In "The Mann Whitney Distribution Using Linked Lists", they + // use linked lists (*gasp*) for this, but within each K it's + // really just a memoization table, so it's faster to use a + // map. The outer structure is a slice indexed by k because we + // need to find all memo entries with certain values of k. + // + // TODO: The n1 and twoU values in the ukeys follow strict + // patterns. For each K value, the n1 values are every integer + // between two bounds. For each (K, n1) value, the twoU values + // are every integer multiple of a certain base between two + // bounds. It might be worth turning these into directly + // indexible slices. + A := make([]map[ukey]float64, K+1) + A[K] = map[ukey]float64{ukey{n1: n1, twoU: twoU}: 0} + + // Compute memo table (k, n1, twoU) triples from high K values + // to low K values. This drives the recurrence relation + // downward to figure out all of the needed argument triples. + // + // TODO: Is it possible to generate this table bottom-up? If + // so, this could be a pure dynamic programming algorithm and + // we could discard the K dimension. We could at least store + // the inputs in a more compact representation that replaces + // the twoU dimension with an interval and a step size (as + // suggested by Cheung, Klotz, not that they make it at all + // clear *why* they're suggesting this). + tsum := sumint(t) // always ∑ t[0:k] + for k := K - 1; k >= 2; k-- { + tsum -= t[k] + A[k] = make(map[ukey]float64) + + // Construct A[k] from A[k+1]. + for A_kplus1 := range A[k+1] { + rkLow := maxint(0, A_kplus1.n1-tsum) + rkHigh := minint(A_kplus1.n1, t[k]) + for rk := rkLow; rk <= rkHigh; rk++ { + twoU_k := A_kplus1.twoU - rk*(a[k+1]-2*A_kplus1.n1+rk) + n1_k := A_kplus1.n1 - rk + if twoUmin(n1_k, t[:k], a) <= twoU_k && twoU_k <= twoUmax(n1_k, t[:k], a) { + key := ukey{n1: n1_k, twoU: twoU_k} + A[k][key] = 0 + } + } + } + } + + // Fill counts in memo table from low K values to high K + // values. This unwinds the recurrence relation. + + // Start with K==2 base case. + // + // TODO: Later computations depend on these, but these don't + // depend on anything (including each other), so if K==2, we + // can skip the memo table altogether. + if K < 2 { + panic("K < 2") + } + N_2 := t[0] + t[1] + for A_2i := range A[2] { + Asum := 0.0 + r2Low := maxint(0, A_2i.n1-t[0]) + r2High := (A_2i.twoU - A_2i.n1*(t[0]-A_2i.n1)) / N_2 + for r2 := r2Low; r2 <= r2High; r2++ { + Asum += mathChoose(t[0], A_2i.n1-r2) * + mathChoose(t[1], r2) + } + A[2][A_2i] = Asum + } + + // Derive counts for the rest of the memo table. + tsum = t[0] // always ∑ t[0:k-1] + for k := 3; k <= K; k++ { + tsum += t[k-2] + + // Compute A[k] counts from A[k-1] counts. + for A_ki := range A[k] { + Asum := 0.0 + rkLow := maxint(0, A_ki.n1-tsum) + rkHigh := minint(A_ki.n1, t[k-1]) + for rk := rkLow; rk <= rkHigh; rk++ { + twoU_kminus1 := A_ki.twoU - rk*(a[k]-2*A_ki.n1+rk) + n1_kminus1 := A_ki.n1 - rk + x, ok := A[k-1][ukey{n1: n1_kminus1, twoU: twoU_kminus1}] + if !ok && twoUmax(n1_kminus1, t[:k-1], a) < twoU_kminus1 { + x = mathChoose(tsum, n1_kminus1) + } + Asum += x * mathChoose(t[k-1], rk) + } + A[k][A_ki] = Asum + } + } + + return A +} + +func twoUmin(n1 int, t, a []int) int { + K := len(t) + twoU := -n1 * n1 + n1_k := n1 + for k := 1; k <= K; k++ { + twoU_k := minint(n1_k, t[k-1]) + twoU += twoU_k * a[k] + n1_k -= twoU_k + } + return twoU +} + +func twoUmax(n1 int, t, a []int) int { + K := len(t) + twoU := -n1 * n1 + n1_k := n1 + for k := K; k > 0; k-- { + twoU_k := minint(n1_k, t[k-1]) + twoU += twoU_k * a[k] + n1_k -= twoU_k + } + return twoU +} + +func (d UDist) PMF(U float64) float64 { + if U < 0 || U >= 0.5+float64(d.N1*d.N2) { + return 0 + } + + if d.hasTies() { + // makeUmemo computes the CDF directly. Take its + // difference to get the PMF. + p1, ok1 := makeUmemo(int(2*U)-1, d.N1, d.T)[len(d.T)][ukey{d.N1, int(2*U) - 1}] + p2, ok2 := makeUmemo(int(2*U), d.N1, d.T)[len(d.T)][ukey{d.N1, int(2 * U)}] + if !ok1 || !ok2 { + panic("makeUmemo did not return expected memoization table") + } + return (p2 - p1) / mathChoose(d.N1+d.N2, d.N1) + } + + // There are no ties. Use the fast algorithm. U must be integral. + Ui := int(math.Floor(U)) + // TODO: Use symmetry to minimize U + return d.p(Ui)[Ui] +} + +func (d UDist) CDF(U float64) float64 { + if U < 0 { + return 0 + } else if U >= float64(d.N1*d.N2) { + return 1 + } + + if d.hasTies() { + // TODO: Minimize U? + p, ok := makeUmemo(int(2*U), d.N1, d.T)[len(d.T)][ukey{d.N1, int(2 * U)}] + if !ok { + panic("makeUmemo did not return expected memoization table") + } + return p / mathChoose(d.N1+d.N2, d.N1) + } + + // There are no ties. Use the fast algorithm. U must be integral. + Ui := int(math.Floor(U)) + // The distribution is symmetric around U = m * n / 2. Sum up + // whichever tail is smaller. + flip := Ui >= (d.N1*d.N2+1)/2 + if flip { + Ui = d.N1*d.N2 - Ui - 1 + } + pdfs := d.p(Ui) + p := 0.0 + for _, pdf := range pdfs[:Ui+1] { + p += pdf + } + if flip { + p = 1 - p + } + return p +} + +func (d UDist) Step() float64 { + return 0.5 +} + +func (d UDist) Bounds() (float64, float64) { + // TODO: More precise bounds when there are ties. + return 0, float64(d.N1 * d.N2) +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/udist_test.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/udist_test.go new file mode 100644 index 0000000000..1351e98744 --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/udist_test.go @@ -0,0 +1,345 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import ( + "fmt" + "math" + "testing" +) + +func aeqTable(t *testing.T, a, b [][]float64) bool { + t.Helper() + + if len(a) != len(b) { + return false + } + for i := range a { + if len(a[i]) != len(b[i]) { + return false + } + for j := range a[i] { + // "%f" precision + if math.Abs(a[i][j]-b[i][j]) >= 0.000001 { + return false + } + } + } + return true +} + +// U distribution for N=3 up to U=5. +var udist3 = [][]float64{ + // m=1 2 3 + {0.250000, 0.100000, 0.050000}, // U=0 + {0.500000, 0.200000, 0.100000}, // U=1 + {0.750000, 0.400000, 0.200000}, // U=2 + {1.000000, 0.600000, 0.350000}, // U=3 + {1.000000, 0.800000, 0.500000}, // U=4 + {1.000000, 0.900000, 0.650000}, // U=5 +} + +// U distribution for N=5 up to U=5. +var udist5 = [][]float64{ + // m=1 2 3 4 5 + {0.166667, 0.047619, 0.017857, 0.007937, 0.003968}, // U=0 + {0.333333, 0.095238, 0.035714, 0.015873, 0.007937}, // U=1 + {0.500000, 0.190476, 0.071429, 0.031746, 0.015873}, // U=2 + {0.666667, 0.285714, 0.125000, 0.055556, 0.027778}, // U=3 + {0.833333, 0.428571, 0.196429, 0.095238, 0.047619}, // U=4 + {1.000000, 0.571429, 0.285714, 0.142857, 0.075397}, // U=5 +} + +func TestUDist(t *testing.T) { + t.Parallel() + + makeTable := func(n int) [][]float64 { + out := make([][]float64, 6) + for U := 0; U < 6; U++ { + out[U] = make([]float64, n) + for m := 1; m <= n; m++ { + out[U][m-1] = UDist{N1: m, N2: n}.CDF(float64(U)) + } + } + return out + } + fmtTable := func(a [][]float64) string { + out := fmt.Sprintf("%8s", "m=") + for m := 1; m <= len(a[0]); m++ { + out += fmt.Sprintf("%9d", m) + } + out += "\n" + + for U, row := range a { + out += fmt.Sprintf("U=%-6d", U) + for m := 1; m <= len(a[0]); m++ { + out += fmt.Sprintf(" %f", row[m-1]) + } + out += "\n" + } + return out + } + + // Compare against tables given in Mann, Whitney (1947). + got3 := makeTable(3) + if !aeqTable(t, got3, udist3) { + t.Errorf("For n=3, want:\n%sgot:\n%s", fmtTable(udist3), fmtTable(got3)) + } + + got5 := makeTable(5) + if !aeqTable(t, got5, udist5) { + t.Errorf("For n=5, want:\n%sgot:\n%s", fmtTable(udist5), fmtTable(got5)) + } +} + +func BenchmarkUDist(b *testing.B) { + for i := 0; i < b.N; i++ { + // R uses the exact distribution up to N=50. + // N*M/2=1250 is the hardest point to get the CDF for. + UDist{N1: 50, N2: 50}.CDF(1250) + } +} + +func TestUDistTies(t *testing.T) { + t.Parallel() + + makeTable := func(m, N int, tVar []int, minx, maxx float64) [][]float64 { + out := [][]float64{} + dist := UDist{N1: m, N2: N - m, T: tVar} + for x := minx; x <= maxx; x += 0.5 { + // Convert x from uQt' to uQv'. + U := x - float64(m*m)/2 + P := dist.CDF(U) + if len(out) == 0 || !aeq(t, out[len(out)-1][1], P) { + out = append(out, []float64{x, P}) + } + } + return out + } + fmtTable := func(table [][]float64) string { + out := "" + for _, row := range table { + out += fmt.Sprintf("%5.1f %f\n", row[0], row[1]) + } + return out + } + + // Compare against Table 1 from Klotz (1966). + got := makeTable(5, 10, []int{1, 1, 2, 1, 1, 2, 1, 1}, 12.5, 19.5) + want := [][]float64{ + {12.5, 0.003968}, {13.5, 0.007937}, + {15.0, 0.023810}, {16.5, 0.047619}, + {17.5, 0.071429}, {18.0, 0.087302}, + {19.0, 0.134921}, {19.5, 0.138889}, + } + if !aeqTable(t, got, want) { + t.Errorf("Want:\n%sgot:\n%s", fmtTable(want), fmtTable(got)) + } + + got = makeTable(10, 21, []int{6, 5, 4, 3, 2, 1}, 52, 87) + want = [][]float64{ + {52.0, 0.000014}, {56.5, 0.000128}, + {57.5, 0.000145}, {60.0, 0.000230}, + {61.0, 0.000400}, {62.0, 0.000740}, + {62.5, 0.000797}, {64.0, 0.000825}, + {64.5, 0.001165}, {65.5, 0.001477}, + {66.5, 0.002498}, {67.0, 0.002725}, + {67.5, 0.002895}, {68.0, 0.003150}, + {68.5, 0.003263}, {69.0, 0.003518}, + {69.5, 0.003603}, {70.0, 0.005648}, + {70.5, 0.005818}, {71.0, 0.006626}, + {71.5, 0.006796}, {72.0, 0.008157}, + {72.5, 0.009688}, {73.0, 0.009801}, + {73.5, 0.010430}, {74.0, 0.011111}, + {74.5, 0.014230}, {75.0, 0.014612}, + {75.5, 0.017249}, {76.0, 0.018307}, + {76.5, 0.020178}, {77.0, 0.022270}, + {77.5, 0.023189}, {78.0, 0.026931}, + {78.5, 0.028207}, {79.0, 0.029979}, + {79.5, 0.030931}, {80.0, 0.038969}, + {80.5, 0.043063}, {81.0, 0.044262}, + {81.5, 0.046389}, {82.0, 0.049581}, + {82.5, 0.056300}, {83.0, 0.058027}, + {83.5, 0.063669}, {84.0, 0.067454}, + {84.5, 0.074122}, {85.0, 0.077425}, + {85.5, 0.083498}, {86.0, 0.094079}, + {86.5, 0.096693}, {87.0, 0.101132}, + } + if !aeqTable(t, got, want) { + t.Errorf("Want:\n%sgot:\n%s", fmtTable(want), fmtTable(got)) + } + + got = makeTable(8, 16, []int{2, 2, 2, 2, 2, 2, 2, 2}, 32, 54) + want = [][]float64{ + {32.0, 0.000078}, {34.0, 0.000389}, + {36.0, 0.001088}, {38.0, 0.002642}, + {40.0, 0.005905}, {42.0, 0.011500}, + {44.0, 0.021057}, {46.0, 0.035664}, + {48.0, 0.057187}, {50.0, 0.086713}, + {52.0, 0.126263}, {54.0, 0.175369}, + } + if !aeqTable(t, got, want) { + t.Errorf("Want:\n%sgot:\n%s", fmtTable(want), fmtTable(got)) + } + + // Check remaining tables from Klotz against the reference + // implementation. + checkRef := func(n1 int, tie []int) { + wantPMF1, wantCDF1 := udistRef(t, n1, tie) + + dist := UDist{N1: n1, N2: sumint(tie) - n1, T: tie} + gotPMF, wantPMF := [][]float64{}, [][]float64{} + gotCDF, wantCDF := [][]float64{}, [][]float64{} + N := sumint(tie) + for U := 0.0; U <= float64(n1*(N-n1)); U += 0.5 { + gotPMF = append(gotPMF, []float64{U, dist.PMF(U)}) + gotCDF = append(gotCDF, []float64{U, dist.CDF(U)}) + wantPMF = append(wantPMF, []float64{U, wantPMF1[int(U*2)]}) + wantCDF = append(wantCDF, []float64{U, wantCDF1[int(U*2)]}) + } + if !aeqTable(t, wantPMF, gotPMF) { + t.Errorf("For PMF of n1=%v, t=%v, want:\n%sgot:\n%s", n1, tie, fmtTable(wantPMF), fmtTable(gotPMF)) + } + if !aeqTable(t, wantCDF, gotCDF) { + t.Errorf("For CDF of n1=%v, t=%v, want:\n%sgot:\n%s", n1, tie, fmtTable(wantCDF), fmtTable(gotCDF)) + } + } + checkRef(5, []int{1, 1, 2, 1, 1, 2, 1, 1}) + checkRef(5, []int{1, 1, 2, 1, 1, 1, 2, 1}) + checkRef(5, []int{1, 3, 1, 2, 1, 1, 1}) + checkRef(8, []int{1, 2, 1, 1, 1, 1, 2, 2, 1, 2}) + checkRef(12, []int{3, 3, 4, 3, 4, 5}) + checkRef(10, []int{1, 2, 3, 4, 5, 6}) +} + +func BenchmarkUDistTies(b *testing.B) { + // Worst case: just one tie. + n := 20 + t := make([]int, 2*n-1) + for i := range t { + t[i] = 1 + } + t[0] = 2 + + for i := 0; i < b.N; i++ { + UDist{N1: n, N2: n, T: t}.CDF(float64(n*n) / 2) + } +} + +func XTestPrintUmemo(t *testing.T) { + // Reproduce table from Cheung, Klotz. + ties := []int{4, 5, 3, 4, 6} + printUmemo(t, makeUmemo(80, 10, ties), ties) +} + +// udistRef computes the PMF and CDF of the U distribution for two +// samples of sizes n1 and sum(t)-n1 with tie vector t. The returned +// pmf and cdf are indexed by 2*U. +// +// This uses the "graphical method" of Klotz (1966). It is very slow +// (Θ(∏ (t[i]+1)) = Ω(2^|t|)), but very correct, and hence useful as a +// reference for testing faster implementations. +func udistRef(t *testing.T, n1 int, tVar []int) (pmf, cdf []float64) { + t.Helper() + // Enumerate all u vectors for which 0 <= ui <= tVari. Count + // the number of permutations of two samples of sizes n1 and + // sum(t)-n1 with tie vector t and accumulate these counts by + // their U statistics in count[2*U]. + counts := make([]int, 1+2*n1*(sumint(tVar)-n1)) + + u := make([]int, len(tVar)) + u[0] = -1 // Get enumeration started. +enumu: + for { + // Compute the next u vector. + u[0]++ + for i := 0; i < len(u) && u[i] > tVar[i]; i++ { + if i == len(u)-1 { + // All u vectors have been enumerated. + break enumu + } + // Carry. + u[i+1]++ + u[i] = 0 + } + + // Is this a legal u vector? + if sumint(u) != n1 { + // Klotz (1966) has a method for directly + // enumerating legal u vectors, but the point + // of this is to be correct, not fast. + continue + } + + // Compute 2*U statistic for this u vector. + twoU, vsum := 0, 0 + for i, ui := range u { + vi := tVar[i] - ui + // U = U + vsum*ui + ui*vi/2 + twoU += 2*vsum*ui + ui*vi + vsum += vi + } + + // Compute Π choose(tVari, ui). This is the number of + // ways of permuting the input sample under u. + prod := 1 + for i, ui := range u { + prod *= int(mathChoose(tVar[i], ui) + 0.5) + } + + // Accumulate the permutations on this u path. + counts[twoU] += prod + + if false { + // Print a table in the form of Klotz's + // "direct enumeration" example. + // + // Convert 2U = 2UQV' to UQt' used in Klotz + // examples. + UQt := float64(twoU)/2 + float64(n1*n1)/2 + fmt.Printf("%+v %f %-2d\n", u, UQt, prod) + } + } + + // Convert counts into probabilities for PMF and CDF. + pmf = make([]float64, len(counts)) + cdf = make([]float64, len(counts)) + total := int(mathChoose(sumint(tVar), n1) + 0.5) + for i, count := range counts { + pmf[i] = float64(count) / float64(total) + if i > 0 { + cdf[i] = cdf[i-1] + } + cdf[i] += pmf[i] + } + return +} + +// printUmemo prints the output of makeUmemo for debugging. +func printUmemo(t *testing.T, A []map[ukey]float64, tVar []int) { + t.Helper() + + fmt.Printf("K\tn1\t2*U\tpr\n") + for K := len(A) - 1; K >= 0; K-- { + for i, pr := range A[K] { + _, ref := udistRef(t, i.n1, tVar[:K]) + fmt.Printf("%v\t%v\t%v\t%v\t%v\n", K, i.n1, i.twoU, pr, ref[i.twoU]) + } + } +} diff --git a/pkg/app/pipedv1/executor/analysis/mannwhitney/util_test.go b/pkg/app/pipedv1/executor/analysis/mannwhitney/util_test.go new file mode 100644 index 0000000000..5c678a6571 --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/mannwhitney/util_test.go @@ -0,0 +1,141 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mannwhitney + +import ( + "fmt" + "math" + "sort" + "strings" + "testing" +) + +func testDiscreteCDF(t *testing.T, name string, dist DiscreteDist) { + t.Parallel() + + // Build the expected CDF out of the PMF. + l, h := dist.Bounds() + s := dist.Step() + want := map[float64]float64{l - 0.1: 0, h: 1} + sum := 0.0 + for x := l; x < h; x += s { + sum += dist.PMF(x) + want[x] = sum + want[x+s/2] = sum + } + + testFunc(t, name, dist.CDF, want) +} + +func testInvCDF(t *testing.T, dist Dist, bounded bool) { + t.Parallel() + + inv := InvCDF(dist) + name := fmt.Sprintf("InvCDF(%+v)", dist) + cdfName := fmt.Sprintf("CDF(%+v)", dist) + + // Test bounds. + vals := map[float64]float64{-0.01: nan, 1.01: nan} + if !bounded { + vals[0] = -inf + vals[1] = inf + } + testFunc(t, name, inv, vals) + + if bounded { + lo, hi := inv(0), inv(1) + vals := map[float64]float64{ + lo - 0.01: 0, lo: 0, + hi: 1, hi + 0.01: 1, + } + testFunc(t, cdfName, dist.CDF, vals) + if got := dist.CDF(lo + 0.01); !(got > 0) { + t.Errorf("%s(0)=%v, but %s(%v)=0", name, lo, cdfName, lo+0.01) + } + if got := dist.CDF(hi - 0.01); !(got < 1) { + t.Errorf("%s(1)=%v, but %s(%v)=1", name, hi, cdfName, hi-0.01) + } + } + + // Test points between. + vals = map[float64]float64{} + for _, p := range vecLinspace(t, 0, 1, 11) { + if p == 0 || p == 1 { + continue + } + x := inv(p) + vals[x] = x + } + testFunc(t, fmt.Sprintf("InvCDF(CDF(%+v))", dist), + func(x float64) float64 { + return inv(dist.CDF(x)) + }, + vals) +} + +// aeq returns true if expect and got are equal to 8 significant +// figures (1 part in 100 million). +func aeq(t *testing.T, expect, got float64) bool { + t.Helper() + + if expect < 0 && got < 0 { + expect, got = -expect, -got + } + return expect*0.99999999 <= got && got*0.99999999 <= expect +} + +func testFunc(t *testing.T, name string, f func(float64) float64, vals map[float64]float64) { + t.Parallel() + + xs := make([]float64, 0, len(vals)) + for x := range vals { + xs = append(xs, x) + } + sort.Float64s(xs) + + for _, x := range xs { + want, got := vals[x], f(x) + if math.IsNaN(want) && math.IsNaN(got) || aeq(t, want, got) { + continue + } + var label string + if strings.Contains(name, "%v") { + label = fmt.Sprintf(name, x) + } else { + label = fmt.Sprintf("%s(%v)", name, x) + } + t.Errorf("want %s=%v, got %v", label, want, got) + } +} + +// vecLinspace returns num values spaced evenly between lo and hi, +// inclusive. If num is 1, this returns an array consisting of lo. +func vecLinspace(t *testing.T, lo, hi float64, num int) []float64 { + t.Helper() + + res := make([]float64, num) + if num == 1 { + res[0] = lo + return res + } + for i := 0; i < num; i++ { + res[i] = lo + float64(i)*(hi-lo)/float64(num-1) + } + return res +} diff --git a/pkg/app/pipedv1/executor/analysis/metrics_analyzer.go b/pkg/app/pipedv1/executor/analysis/metrics_analyzer.go new file mode 100644 index 0000000000..e0c7486673 --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/metrics_analyzer.go @@ -0,0 +1,464 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "bytes" + "context" + "errors" + "fmt" + "text/template" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/analysisresultstore" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/analysis/mannwhitney" + "github.com/pipe-cd/pipecd/pkg/config" +) + +const ( + canaryVariantName = "canary" + baselineVariantName = "baseline" + primaryVariantName = "primary" +) + +type metricsAnalyzer struct { + id string + cfg config.AnalysisMetrics + stageStartTime time.Time + provider metrics.Provider + analysisResultStore executor.AnalysisResultStore + // Application-specific arguments using when rendering the query. + argsTemplate argsTemplate + logger *zap.Logger + logPersister executor.LogPersister +} + +func newMetricsAnalyzer(id string, cfg config.AnalysisMetrics, stageStartTime time.Time, provider metrics.Provider, analysisResultStore executor.AnalysisResultStore, argsTemplate argsTemplate, logger *zap.Logger, logPersister executor.LogPersister) *metricsAnalyzer { + return &metricsAnalyzer{ + id: id, + cfg: cfg, + stageStartTime: stageStartTime, + provider: provider, + analysisResultStore: analysisResultStore, + argsTemplate: argsTemplate, + logPersister: logPersister, + logger: logger.With( + zap.String("analyzer-id", id), + ), + } +} + +// run starts an analysis which runs the query at the given interval, until the context is done. +// It returns an error when the number of failures exceeds the the failureLimit. +func (a *metricsAnalyzer) run(ctx context.Context) error { + ticker := time.NewTicker(a.cfg.Interval.Duration()) + defer ticker.Stop() + + failureCount := 0 + for { + select { + case <-ticker.C: + var ( + expected bool + err error + ) + switch a.cfg.Strategy { + case config.AnalysisStrategyThreshold: + expected, err = a.analyzeWithThreshold(ctx) + case config.AnalysisStrategyPrevious: + var firstDeploy bool + expected, firstDeploy, err = a.analyzeWithPrevious(ctx) + if firstDeploy { + a.logPersister.Infof("[%s] PreviousAnalysis cannot be executed because this seems to be the first deployment, so it is considered as a success", a.id) + return nil + } + case config.AnalysisStrategyCanaryBaseline: + expected, err = a.analyzeWithCanaryBaseline(ctx) + case config.AnalysisStrategyCanaryPrimary: + expected, err = a.analyzeWithCanaryPrimary(ctx) + default: + return fmt.Errorf("unknown strategy %q given", a.cfg.Strategy) + } + // Ignore parent's context deadline exceeded error, and return immediately. + if errors.Is(err, context.DeadlineExceeded) && ctx.Err() == context.DeadlineExceeded { + return nil + } + if errors.Is(err, metrics.ErrNoDataFound) && a.cfg.SkipOnNoData { + a.logPersister.Infof("[%s] The query result evaluation was skipped because \"skipOnNoData\" is true though no data returned. Reason: %v", a.id, err) + continue + } + if err != nil { + a.logPersister.Errorf("[%s] Unexpected error: %v", a.id, err) + } + if expected { + a.logPersister.Successf("[%s] The query result is expected one", a.id) + continue + } + failureCount++ + if failureCount > a.cfg.FailureLimit { + return fmt.Errorf("analysis '%s' failed because the failure number exceeded the failure limit (%d)", a.id, a.cfg.FailureLimit) + } + case <-ctx.Done(): + return nil + } + } +} + +// analyzeWithThreshold returns false if any data point is out of the prediction range. +// Return an error if the evaluation could not be executed normally. +func (a *metricsAnalyzer) analyzeWithThreshold(ctx context.Context) (bool, error) { + if err := a.cfg.Expected.Validate(); err != nil { + return false, fmt.Errorf("\"expected\" is required to analyze with the THRESHOLD strategy") + } + + now := time.Now() + queryRange := metrics.QueryRange{ + From: now.Add(-a.cfg.Interval.Duration()), + To: now, + } + + a.logPersister.Infof("[%s] Run query: %q, in range: %v", a.id, a.cfg.Query, queryRange) + points, err := a.provider.QueryPoints(ctx, a.cfg.Query, queryRange) + if err != nil { + return false, fmt.Errorf("failed to run query: %w", err) + } + if len(points) == 0 { + a.logPersister.Infof("[%s] This analysis stage will be skipped since there was no data point to compare", a.id) + return true, nil + } + + var outiler metrics.DataPoint + expected := true + for i := range points { + if a.cfg.Expected.InRange(points[i].Value) { + continue + } + expected = false + outiler = points[i] + break + } + if !expected { + a.logPersister.Errorf("[%s] Failed because it found a data point (%s) that is outside the expected range (%s). Performed query: %q", a.id, &outiler, &a.cfg.Expected, a.cfg.Query) + return false, nil + } + + return true, nil +} + +// analyzeWithPrevious returns false if primary deviates in the specified direction compared to the previous deployment. +// Return an error if the evaluation could not be executed normally. +// elapsedTime is used to compare metrics at the same point in time after the analysis has started. +func (a *metricsAnalyzer) analyzeWithPrevious(ctx context.Context) (expected, firstDeploy bool, err error) { + now := time.Now() + queryRange := metrics.QueryRange{ + From: now.Add(-a.cfg.Interval.Duration()), + To: now, + } + + a.logPersister.Infof("[%s] Run query: %q, in range: %v", a.id, a.cfg.Query, queryRange) + points, err := a.provider.QueryPoints(ctx, a.cfg.Query, queryRange) + if err != nil { + return false, false, fmt.Errorf("failed to run query: %w: performed query: %q", err, a.cfg.Query) + } + pointsCount := len(points) + a.logPersister.Infof("[%s] Got %d data points for current Primary from the query: %q", a.id, pointsCount, a.cfg.Query) + values := make([]float64, 0, pointsCount) + for i := range points { + values = append(values, points[i].Value) + } + + prevMetadata, err := a.analysisResultStore.GetLatestAnalysisResult(ctx) + if errors.Is(err, analysisresultstore.ErrNotFound) { + return false, true, nil + } + if err != nil { + return false, false, fmt.Errorf("failed to fetch the most recent successful analysis metadata: %w", err) + } + // Compare it with the previous metrics when the same amount of time as now has passed since the start of the stage. + elapsedTime := now.Sub(a.stageStartTime) + prevTo := time.Unix(prevMetadata.StartTime, 0).Add(elapsedTime) + prevFrom := prevTo.Add(-a.cfg.Interval.Duration()) + prevQueryRange := metrics.QueryRange{ + From: prevFrom, + To: prevTo, + } + + a.logPersister.Infof("[%s] Run query: %q, in range: %v", a.id, a.cfg.Query, prevQueryRange) + prevPoints, err := a.provider.QueryPoints(ctx, a.cfg.Query, prevQueryRange) + if err != nil { + return false, false, fmt.Errorf("failed to run query to fetch metrics for the previous deployment: %w: performed query: %q", err, a.cfg.Query) + } + prevPointsCount := len(prevPoints) + a.logPersister.Infof("[%s] Got %d data points for previous Primary from the query: %q", a.id, prevPointsCount, a.cfg.Query) + prevValues := make([]float64, 0, prevPointsCount) + for i := range prevPoints { + prevValues = append(prevValues, prevPoints[i].Value) + } + expected, err = a.compare(values, prevValues, a.cfg.Deviation) + if err != nil { + a.logPersister.Errorf("[%s] Failed to compare data points: %v", a.id, err) + a.logPersister.Infof("[%s] Performed query: %q", a.id, a.cfg.Query) + return false, false, err + } + if !expected { + a.logPersister.Errorf("[%s] The difference between Current Primary and Previous one is statistically significant", a.id) + a.logPersister.Infof("[%s] Performed query range for current Primary: %q", a.id, &queryRange) + a.logPersister.Infof("[%s] Performed query range for previous Primary: %q", a.id, &prevQueryRange) + a.logPersister.Infof("[%s] Performed query: %q", a.id, a.cfg.Query) + a.logPersister.Infof("[%s] Current data points acquired:", a.id) + for i := range points { + a.logPersister.Infof("[%s] %s", a.id, &points[i]) + } + a.logPersister.Infof("[%s] Previous data points acquired:", a.id) + for i := range prevPoints { + a.logPersister.Infof("[%s] %s", a.id, &prevPoints[i]) + } + return false, false, nil + } + return true, false, nil +} + +// analyzeWithCanaryBaseline returns false if canary deviates in the specified direction compared to baseline. +// Return an error if the evaluation could not be executed normally. +func (a *metricsAnalyzer) analyzeWithCanaryBaseline(ctx context.Context) (bool, error) { + now := time.Now() + queryRange := metrics.QueryRange{ + From: now.Add(-a.cfg.Interval.Duration()), + To: now, + } + canaryQuery, err := a.renderQuery(a.cfg.Query, a.cfg.CanaryArgs, canaryVariantName) + if err != nil { + return false, fmt.Errorf("failed to render query template for Canary: %w", err) + } + baselineQuery, err := a.renderQuery(a.cfg.Query, a.cfg.BaselineArgs, baselineVariantName) + if err != nil { + return false, fmt.Errorf("failed to render query template for Baseline: %w", err) + } + + // Fetch data points from Canary. + a.logPersister.Infof("[%s] Run query: %q, in range: %v", a.id, canaryQuery, queryRange) + canaryPoints, err := a.provider.QueryPoints(ctx, canaryQuery, queryRange) + if err != nil { + return false, fmt.Errorf("failed to run query to fetch metrics for the Canary variant: %w: query range: %s: performed query: %q", err, &queryRange, canaryQuery) + } + canaryPointsCount := len(canaryPoints) + a.logPersister.Infof("[%s] Got %d data points for Canary from the query: %q", a.id, canaryPointsCount, canaryQuery) + canaryValues := make([]float64, 0, canaryPointsCount) + for i := range canaryPoints { + canaryValues = append(canaryValues, canaryPoints[i].Value) + } + + // Fetch data points from Baseline. + a.logPersister.Infof("[%s] Run query: %q, in range: %v", a.id, baselineQuery, queryRange) + baselinePoints, err := a.provider.QueryPoints(ctx, baselineQuery, queryRange) + if err != nil { + return false, fmt.Errorf("failed to run query to fetch metrics for the Baseline variant: %w: query range: %s: performed query: %q", err, &queryRange, baselineQuery) + } + baselinePointsCount := len(baselinePoints) + a.logPersister.Infof("[%s] Got %d data points for Baseline from the query: %q", a.id, baselinePointsCount, baselineQuery) + baselineValues := make([]float64, 0, baselinePointsCount) + for i := range baselinePoints { + baselineValues = append(baselineValues, baselinePoints[i].Value) + } + + expected, err := a.compare(canaryValues, baselineValues, a.cfg.Deviation) + if err != nil { + a.logPersister.Errorf("[%s] Failed to compare data points: %v", a.id, err) + a.logPersister.Infof("[%s] Performed query for Canary: %q", a.id, canaryQuery) + a.logPersister.Infof("[%s] Performed query for Baseline: %q", a.id, baselineQuery) + return false, err + } + if !expected { + a.logPersister.Errorf("[%s] The difference between Canary and Baseline is statistically significant", a.id) + a.logPersister.Infof("[%s] Performed query range: %q", a.id, &queryRange) + a.logPersister.Infof("[%s] Performed query for Canary: %q", a.id, canaryQuery) + a.logPersister.Infof("[%s] Performed query for Baseline: %q", a.id, baselineQuery) + a.logPersister.Infof("[%s] Canary data points acquired:", a.id) + for i := range canaryPoints { + a.logPersister.Infof("[%s] %s", a.id, &canaryPoints[i]) + } + a.logPersister.Infof("[%s] Baseline data points acquired:", a.id) + for i := range baselinePoints { + a.logPersister.Infof("[%s] %s", a.id, &baselinePoints[i]) + } + return false, nil + } + return true, nil +} + +// analyzeWithCanaryPrimary returns false if canary deviates in the specified direction compared to primary. +// Return an error if the evaluation could not be executed normally. +func (a *metricsAnalyzer) analyzeWithCanaryPrimary(ctx context.Context) (bool, error) { + now := time.Now() + queryRange := metrics.QueryRange{ + From: now.Add(-a.cfg.Interval.Duration()), + To: now, + } + canaryQuery, err := a.renderQuery(a.cfg.Query, a.cfg.CanaryArgs, canaryVariantName) + if err != nil { + return false, fmt.Errorf("failed to render query template for Canary: %w", err) + } + primaryQuery, err := a.renderQuery(a.cfg.Query, a.cfg.PrimaryArgs, primaryVariantName) + if err != nil { + return false, fmt.Errorf("failed to render query template for Primary: %w", err) + } + + a.logPersister.Infof("[%s] Run query: %q, in range: %v", a.id, canaryQuery, queryRange) + canaryPoints, err := a.provider.QueryPoints(ctx, canaryQuery, queryRange) + if err != nil { + return false, fmt.Errorf("failed to run query to fetch metrics for the Canary variant: %w: performed query: %q", err, canaryQuery) + } + canaryPointsCount := len(canaryPoints) + a.logPersister.Infof("[%s] Got %d data points for Canary from the query: %q", a.id, canaryPointsCount, canaryQuery) + canaryValues := make([]float64, 0, canaryPointsCount) + for i := range canaryPoints { + canaryValues = append(canaryValues, canaryPoints[i].Value) + } + + a.logPersister.Infof("[%s] Run query: %q, in range: %v", a.id, primaryQuery, queryRange) + primaryPoints, err := a.provider.QueryPoints(ctx, primaryQuery, queryRange) + if err != nil { + return false, fmt.Errorf("failed to run query to fetch metrics for the Primary variant: %w: performed query: %q", err, primaryQuery) + } + primaryPointsCount := len(primaryPoints) + a.logPersister.Infof("[%s] Got %d data points for Primary from the query: %q", a.id, primaryPointsCount, primaryQuery) + primaryValues := make([]float64, 0, primaryPointsCount) + for i := range primaryPoints { + primaryValues = append(primaryValues, primaryPoints[i].Value) + } + expected, err := a.compare(canaryValues, primaryValues, a.cfg.Deviation) + if err != nil { + a.logPersister.Errorf("[%s] Failed to compare data points: %v", a.id, err) + a.logPersister.Infof("[%s] Performed query for Canary: %q", a.id, canaryQuery) + a.logPersister.Infof("[%s] Performed query for Primary: %q", a.id, primaryQuery) + return false, err + } + if !expected { + a.logPersister.Errorf("[%s] The difference between Canary and Primary is statistically significant", a.id) + a.logPersister.Infof("[%s] Performed query range: %q", a.id, &queryRange) + a.logPersister.Infof("[%s] Performed query for Canary: %q", a.id, canaryQuery) + a.logPersister.Infof("[%s] Performed query for Primary: %q", a.id, primaryQuery) + a.logPersister.Infof("[%s] Canary data points acquired:", a.id) + for i := range canaryPoints { + a.logPersister.Infof("[%s] %s", a.id, &canaryPoints[i]) + } + a.logPersister.Infof("[%s] Primary data points acquired:", a.id) + for i := range primaryPoints { + a.logPersister.Infof("[%s] %s", a.id, &primaryPoints[i]) + } + return false, nil + } + return true, nil +} + +// compare compares the given two samples using Mann-Whitney U test. +// Considered as failure if it deviates in the specified direction as the third argument. +// If both of the point values is empty, this returns true. +func (a *metricsAnalyzer) compare(experiment, control []float64, deviation string) (acceptable bool, err error) { + if len(experiment) == 0 && len(control) == 0 { + a.logPersister.Infof("[%s] The analysis stage will be skipped since there was no data point to compare", a.id) + return true, nil + } + if len(experiment) == 0 { + return false, fmt.Errorf("no data points of Experiment found") + } + if len(control) == 0 { + return false, fmt.Errorf("no data points of Control found") + } + var alternativeHypothesis mannwhitney.LocationHypothesis + switch deviation { + case config.AnalysisDeviationEither: + alternativeHypothesis = mannwhitney.LocationDiffers + case config.AnalysisDeviationLow: + alternativeHypothesis = mannwhitney.LocationLess + case config.AnalysisDeviationHigh: + alternativeHypothesis = mannwhitney.LocationGreater + default: + return false, fmt.Errorf("unknown deviation %q given", deviation) + } + res, err := mannwhitney.MannWhitneyUTest(experiment, control, alternativeHypothesis) + if errors.Is(err, mannwhitney.ErrSamplesEqual) { + // All samples are exact the same. + return true, nil + } + if err != nil { + return false, fmt.Errorf("failed to perform the Mann-Whitney U test: %w", err) + } + + // alpha is the significance level. Typically 5% is used. + const alpha = 0.05 + // If the p-value is greater than the significance level, + // we cannot say that the distributions in the two groups differed significantly. + // See: https://support.minitab.com/en-us/minitab-express/1/help-and-how-to/basic-statistics/inference/how-to/two-samples/mann-whitney-test/interpret-the-results/key-results/ + if res.P > alpha { + return true, nil + } + return false, nil +} + +// argsTemplate is a collection of available template arguments. +// NOTE: Changing its fields will force users to change the template definition. +type argsTemplate struct { + // The args that are automatically populated. + App appArgs + K8s k8sArgs + Variant variantArgs + + // User-defined custom args. + VariantCustomArgs map[string]string + AppCustomArgs map[string]string +} + +// appArgs allows application-specific data to be embedded in the query. +type appArgs struct { + Name string + Env string +} + +type k8sArgs struct { + Namespace string +} + +// variantArgs allows variant-specific data to be embedded in the query. +type variantArgs struct { + // One of "primary", "canary", or "baseline" will be populated. + Name string +} + +// renderQuery applies the given variant args to the query template. +func (a *metricsAnalyzer) renderQuery(queryTemplate string, variantCustomArgs map[string]string, variant string) (string, error) { + args := argsTemplate{ + Variant: variantArgs{Name: variant}, + VariantCustomArgs: variantCustomArgs, + App: a.argsTemplate.App, + K8s: a.argsTemplate.K8s, + AppCustomArgs: a.argsTemplate.AppCustomArgs, + } + + t, err := template.New("AnalysisTemplate").Parse(queryTemplate) + if err != nil { + return "", fmt.Errorf("failed to parse query template: %w", err) + } + + b := new(bytes.Buffer) + if err := t.Execute(b, args); err != nil { + return "", fmt.Errorf("failed to apply template: %w", err) + } + return b.String(), err +} diff --git a/pkg/app/pipedv1/executor/analysis/metrics_analyzer_test.go b/pkg/app/pipedv1/executor/analysis/metrics_analyzer_test.go new file mode 100644 index 0000000000..3472310afa --- /dev/null +++ b/pkg/app/pipedv1/executor/analysis/metrics_analyzer_test.go @@ -0,0 +1,393 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/analysisprovider/metrics" + "github.com/pipe-cd/pipecd/pkg/config" +) + +type fakeMetricsProvider struct { + points []metrics.DataPoint + err error +} + +func (f *fakeMetricsProvider) Type() string { return "" } +func (f *fakeMetricsProvider) QueryPoints(_ context.Context, _ string, _ metrics.QueryRange) ([]metrics.DataPoint, error) { + return f.points, f.err +} + +type fakeLogPersister struct{} + +func (l *fakeLogPersister) Write(_ []byte) (int, error) { return 0, nil } +func (l *fakeLogPersister) Info(_ string) {} +func (l *fakeLogPersister) Infof(_ string, _ ...interface{}) {} +func (l *fakeLogPersister) Success(_ string) {} +func (l *fakeLogPersister) Successf(_ string, _ ...interface{}) {} +func (l *fakeLogPersister) Error(_ string) {} +func (l *fakeLogPersister) Errorf(_ string, _ ...interface{}) {} + +func floatToPointer(n float64) *float64 { return &n } + +func Test_metricsAnalyzer_analyzeWithThreshold(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + metricsAnalyzer *metricsAnalyzer + want bool + wantErr bool + }{ + { + name: "no expected field given", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + }, + want: false, + wantErr: true, + }, + { + name: "query failed", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + Expected: config.AnalysisExpected{Max: floatToPointer(1)}, + }, + provider: &fakeMetricsProvider{ + err: fmt.Errorf("query failed"), + }, + }, + want: false, + wantErr: true, + }, + { + name: "there is a point outside the expected range", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + Expected: config.AnalysisExpected{Max: floatToPointer(1)}, + }, + provider: &fakeMetricsProvider{ + points: []metrics.DataPoint{ + {Value: 0.9}, + {Value: 1.1}, + {Value: 0.8}, + }, + }, + }, + want: false, + wantErr: false, + }, + { + name: "all points are expected ones", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + Expected: config.AnalysisExpected{Max: floatToPointer(1)}, + }, + provider: &fakeMetricsProvider{ + points: []metrics.DataPoint{ + {Value: 0.9}, + {Value: 0.9}, + {Value: 0.8}, + }, + }, + }, + want: true, + wantErr: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + tc.metricsAnalyzer.logger = zap.NewNop() + tc.metricsAnalyzer.logPersister = &fakeLogPersister{} + got, err := tc.metricsAnalyzer.analyzeWithThreshold(context.Background()) + assert.Equal(t, tc.wantErr, err != nil) + assert.Equal(t, tc.want, got) + }) + } +} + +func Test_metricsAnalyzer_compare(t *testing.T) { + t.Parallel() + + type args struct { + experiment []float64 + control []float64 + deviation string + } + testcases := []struct { + name string + metricsAnalyzer *metricsAnalyzer + args args + wantExpected bool + wantErr bool + }{ + { + name: "empty data points given", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: []float64{}, + control: []float64{0.1, 0.2, 0.3, 0.4, 0.5}, + deviation: "EITHER", + }, + wantExpected: false, + wantErr: true, + }, + { + name: "no significance", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: []float64{0.1, 0.2, 0.3, 0.4, 0.5}, + control: []float64{0.1, 0.2, 0.3, 0.4, 0.5}, + deviation: "EITHER", + }, + wantExpected: true, + wantErr: false, + }, + { + name: "deviation on high direction as expected", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: []float64{10.1, 10.2, 10.3, 10.4, 10.5}, + control: []float64{0.1, 0.2, 0.3, 0.4, 0.5}, + deviation: "LOW", + }, + wantExpected: true, + wantErr: false, + }, + { + name: "deviation on low direction as expected", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: []float64{0.1, 0.2, 0.3, 0.4, 0.5}, + control: []float64{10.1, 10.2, 10.3, 10.4, 10.5}, + deviation: "HIGH", + }, + wantExpected: true, + wantErr: false, + }, + { + name: "deviation on high direction as unexpected", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: []float64{10.1, 10.2, 10.3, 10.4, 10.5}, + control: []float64{0.1, 0.2, 0.3, 0.4, 0.5}, + deviation: "HIGH", + }, + wantExpected: false, + wantErr: false, + }, + { + name: "deviation on low direction as unexpected", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: []float64{0.1, 0.2, 0.3, 0.4, 0.5}, + control: []float64{10.1, 10.2, 10.3, 10.4, 10.5}, + deviation: "LOW", + }, + wantExpected: false, + wantErr: false, + }, + { + name: "deviation as unexpected", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: []float64{0.1, 0.2, 5.3, 0.2, 0.5}, + control: []float64{0.1, 0.1, 0.1, 0.1, 0.1}, + deviation: "EITHER", + }, + wantExpected: false, + wantErr: false, + }, + { + name: "the data points is empty", + metricsAnalyzer: &metricsAnalyzer{ + id: "id", + cfg: config.AnalysisMetrics{ + Provider: "provider", + Query: "query", + }, + provider: &fakeMetricsProvider{}, + logger: zap.NewNop(), + logPersister: &fakeLogPersister{}, + }, + args: args{ + experiment: nil, + control: nil, + deviation: "EITHER", + }, + wantExpected: true, + wantErr: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got, err := tc.metricsAnalyzer.compare(tc.args.experiment, tc.args.control, tc.args.deviation) + assert.Equal(t, tc.wantErr, err != nil) + assert.Equal(t, tc.wantExpected, got) + }) + } +} + +func Test_metricsAnalyzer_renderQuery(t *testing.T) { + t.Parallel() + + type args struct { + queryTemplate string + variantCustomArgs map[string]string + variant string + } + testcases := []struct { + name string + metricsAnalyzer *metricsAnalyzer + args args + want string + wantErr bool + }{ + { + name: "using only variant built in args", + args: args{ + queryTemplate: `variant="{{ .Variant.Name }}"`, + variant: "canary", + }, + metricsAnalyzer: &metricsAnalyzer{}, + want: `variant="canary"`, + wantErr: false, + }, + { + name: "using variant and app built in args", + args: args{ + queryTemplate: `variant="{{ .Variant.Name }}", app="{{ .App.Name }}"`, + variant: "canary", + }, + metricsAnalyzer: &metricsAnalyzer{ + argsTemplate: argsTemplate{ + App: appArgs{ + Name: "app-1", + }, + }, + }, + want: `variant="canary", app="app-1"`, + wantErr: false, + }, + { + name: "using variant and app built in and custom args", + args: args{ + queryTemplate: `variant="{{ .Variant.Name }}", app="{{ .App.Name }}", pod="{{ .VariantCustomArgs.pod }}", id="{{ .AppCustomArgs.id }}"`, + variantCustomArgs: map[string]string{"pod": "1234"}, + variant: "canary", + }, + metricsAnalyzer: &metricsAnalyzer{ + argsTemplate: argsTemplate{ + App: appArgs{ + Name: "app-1", + }, + AppCustomArgs: map[string]string{"id": "xxxx"}, + }, + }, + want: `variant="canary", app="app-1", pod="1234", id="xxxx"`, + wantErr: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got, err := tc.metricsAnalyzer.renderQuery(tc.args.queryTemplate, tc.args.variantCustomArgs, tc.args.variant) + assert.Equal(t, tc.wantErr, err != nil) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/executor/cloudrun/cloudrun.go b/pkg/app/pipedv1/executor/cloudrun/cloudrun.go new file mode 100644 index 0000000000..b58ce40309 --- /dev/null +++ b/pkg/app/pipedv1/executor/cloudrun/cloudrun.go @@ -0,0 +1,255 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error + RegisterRollback(kind model.RollbackKind, f executor.Factory) error +} + +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &deployExecutor{ + Input: in, + } + } + r.Register(model.StageCloudRunSync, f) + r.Register(model.StageCloudRunPromote, f) + + r.RegisterRollback(model.RollbackKind_Rollback_CLOUDRUN, func(in executor.Input) executor.Executor { + return &rollbackExecutor{ + Input: in, + } + }) +} + +func loadServiceManifest(in *executor.Input, serviceManifestFile string, ds *deploysource.DeploySource) (provider.ServiceManifest, bool) { + in.LogPersister.Infof("Loading service manifest at commit %s", ds.Revision) + + sm, err := provider.LoadServiceManifest(ds.AppDir, serviceManifestFile) + if err != nil { + in.LogPersister.Errorf("Failed to load service manifest (%v)", err) + return provider.ServiceManifest{}, false + } + + in.LogPersister.Infof("Successfully loaded the service manifest at commit %s", ds.Revision) + return sm, true +} + +func findPlatformProvider(in *executor.Input) (name string, cfg *config.PlatformProviderCloudRunConfig, found bool) { + name = in.Application.PlatformProvider + if name == "" { + in.LogPersister.Error("Missing the PlatformProvider name in the application configuration") + return + } + + cp, ok := in.PipedConfig.FindPlatformProvider(name, model.ApplicationKind_CLOUDRUN) + if !ok { + in.LogPersister.Errorf("The specified platform provider %q was not found in piped configuration", name) + return + } + + cfg = cp.CloudRunConfig + found = true + return +} + +func decideRevisionName(sm provider.ServiceManifest, commit string, lp executor.LogPersister) (revision string, ok bool) { + var err error + revision, err = provider.DecideRevisionName(sm, commit) + if err != nil { + lp.Errorf("Unable to decide revision name for the commit %s (%v)", commit, err) + return + } + + ok = true + return +} + +func configureServiceManifest(sm provider.ServiceManifest, revision string, traffics []provider.RevisionTraffic, lp executor.LogPersister) bool { + if revision != "" { + if err := sm.SetRevision(revision); err != nil { + lp.Errorf("Unable to set revision name to service manifest (%v)", err) + return false + } + } + + if err := sm.UpdateTraffic(traffics); err != nil { + lp.Errorf("Unable to configure traffic percentages to service manifest (%v)", err) + return false + } + + lp.Info("Successfully prepared service manifest with traffic percentages as below:") + for _, t := range traffics { + lp.Infof(" %s: %d", t.RevisionName, t.Percent) + } + + return true +} + +func apply(ctx context.Context, client provider.Client, sm provider.ServiceManifest, lp executor.LogPersister) bool { + lp.Info("Start applying the service manifest") + + _, err := client.Update(ctx, sm) + if err == nil { + lp.Infof("Successfully updated the service %s", sm.Name) + return true + } + + if err != provider.ErrServiceNotFound { + lp.Errorf("Failed to update the service %s (%v)", sm.Name, err) + return false + } + + lp.Infof("Service %s was not found, a new service will be created", sm.Name) + + if _, err := client.Create(ctx, sm); err != nil { + lp.Errorf("Failed to create the service %s (%v)", sm.Name, err) + return false + } + + lp.Infof("Successfully created the service %s", sm.Name) + return true +} + +func waitRevisionReady(ctx context.Context, client provider.Client, revisionName string, retryDuration, retryTimeout time.Duration, lp executor.LogPersister) error { + shouldCheckConditions := map[string]struct{}{ + "Active": struct{}{}, + "Ready": struct{}{}, + "ConfigurationsReady": struct{}{}, + "RoutesReady": struct{}{}, + "ContainerHealthy": struct{}{}, + "ResourcesAvailable": struct{}{}, + } + mustPassConditions := map[string]struct{}{ + "Ready": struct{}{}, + "Active": struct{}{}, + } + + doCheck := func() (bool, error) { + rvs, err := client.GetRevision(ctx, revisionName) + // NotFound should be a retriable error. + if err == provider.ErrRevisionNotFound { + return true, err + } + if err != nil { + return false, err + } + + var ( + trueConds = make(map[string]struct{}, 0) + falseConds = make([]string, 0, len(shouldCheckConditions)) + unknownConds = make([]string, 0, len(shouldCheckConditions)) + ) + if rvs.Status != nil { + for _, cond := range rvs.Status.Conditions { + if _, ok := shouldCheckConditions[cond.Type]; !ok { + continue + } + switch cond.Status { + case "True": + trueConds[cond.Type] = struct{}{} + case "False": + falseConds = append(falseConds, cond.Message) + default: + unknownConds = append(unknownConds, cond.Message) + } + } + } + + if len(falseConds) > 0 { + return false, fmt.Errorf("%s", strings.Join(falseConds, "\n")) + } + if len(unknownConds) > 0 { + return true, fmt.Errorf("%s", strings.Join(unknownConds, "\n")) + } + for k := range mustPassConditions { + if _, ok := trueConds[k]; !ok { + return true, fmt.Errorf("could not check status field %q", k) + } + } + return false, nil + } + + start := time.Now() + for { + retry, err := doCheck() + if !retry { + if err != nil { + lp.Errorf("Revision %s was not ready: %v", revisionName, err) + return err + } + lp.Infof("Revision %s is ready to receive traffic", revisionName) + return nil + } + + if time.Since(start) > retryTimeout { + lp.Errorf("Revision %s was not ready: %v", revisionName, err) + return err + } + + lp.Infof("Revision %s is still not ready (%v), will retry after %v", revisionName, err, retryDuration) + time.Sleep(retryDuration) + } +} + +func revisionExists(ctx context.Context, client provider.Client, revisionName string, lp executor.LogPersister) (bool, error) { + _, err := client.GetRevision(ctx, revisionName) + if err == nil { + return true, nil + } + + if err == provider.ErrRevisionNotFound { + return false, nil + } + + lp.Errorf("Failed while checking the existence of revision %s (%v)", revisionName, err) + return false, err +} + +func addBuiltinLabels(sm provider.ServiceManifest, hash, pipedID, appID, revisionName string, lp executor.LogPersister) bool { + labels := map[string]string{ + provider.LabelManagedBy: provider.ManagedByPiped, + provider.LabelPiped: pipedID, + provider.LabelApplication: appID, + provider.LabelCommitHash: hash, + } + // Set builtinLabels for Service. + sm.AddLabels(labels) + + if revisionName == "" { + return true + } + // Set buildinLabels for Revision. + labels[provider.LabelRevisionName] = revisionName + if err := sm.AddRevisionLabels(labels); err != nil { + lp.Errorf("Unable to add revision labels for the service manifest %s (%v)", sm.Name, err) + return false + } + return true +} diff --git a/pkg/app/pipedv1/executor/cloudrun/cloudrun_test.go b/pkg/app/pipedv1/executor/cloudrun/cloudrun_test.go new file mode 100644 index 0000000000..de99bf6873 --- /dev/null +++ b/pkg/app/pipedv1/executor/cloudrun/cloudrun_test.go @@ -0,0 +1,93 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "testing" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const serviceManifest = ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + uid: service-uid + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +` + +func TestAddBuiltinLabels(t *testing.T) { + t.Parallel() + + var ( + hash = "commit-hash" + pipedID = "piped-id" + appID = "app-id" + revisionName = "revision-name" + ) + sm, err := provider.ParseServiceManifest([]byte(serviceManifest)) + require.NoError(t, err) + + ok := addBuiltinLabels(sm, hash, pipedID, appID, revisionName, nil) + require.True(t, ok) + + want := map[string]string{ + provider.LabelManagedBy: provider.ManagedByPiped, + provider.LabelPiped: pipedID, + provider.LabelApplication: appID, + provider.LabelCommitHash: hash, + } + got := sm.Labels() + assert.Equal(t, want, got) + + want = map[string]string{ + provider.LabelManagedBy: provider.ManagedByPiped, + provider.LabelPiped: pipedID, + provider.LabelApplication: appID, + provider.LabelCommitHash: hash, + provider.LabelRevisionName: revisionName, + } + got = sm.RevisionLabels() + assert.Equal(t, want, got) +} diff --git a/pkg/app/pipedv1/executor/cloudrun/deploy.go b/pkg/app/pipedv1/executor/cloudrun/deploy.go new file mode 100644 index 0000000000..a5acd0e12d --- /dev/null +++ b/pkg/app/pipedv1/executor/cloudrun/deploy.go @@ -0,0 +1,235 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "strconv" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" + + "go.uber.org/zap" +) + +const ( + promotePercentageMetadataKey = "promote-percentage" + revisionCheckDuration = 10 * time.Second + revisionCheckTimeout = 2 * time.Minute +) + +type deployExecutor struct { + executor.Input + + deploySource *deploysource.DeploySource + appCfg *config.CloudRunApplicationSpec + client provider.Client +} + +func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus { + ctx := sig.Context() + ds, err := e.TargetDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.deploySource = ds + e.appCfg = ds.ApplicationConfig.CloudRunApplicationSpec + if e.appCfg == nil { + e.LogPersister.Error("Malformed application configuration: missing CloudRunApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + cpName, cpCfg, found := findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + e.client, err = provider.DefaultRegistry().Client(ctx, cpName, cpCfg, e.Logger) + if err != nil { + e.LogPersister.Errorf("Unable to create ClourRun client for the provider (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + var ( + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageCloudRunSync: + status = e.ensureSync(ctx) + + case model.StageCloudRunPromote: + status = e.ensurePromote(ctx) + + default: + e.LogPersister.Errorf("Unsupported stage %s for cloudrun application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *deployExecutor) ensureSync(ctx context.Context) model.StageStatus { + sm, ok := loadServiceManifest(&e.Input, e.appCfg.Input.ServiceManifestFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + revision, ok := decideRevisionName(sm, e.Deployment.Trigger.Commit.Hash, e.LogPersister) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + traffics := []provider.RevisionTraffic{ + { + RevisionName: revision, + Percent: 100, + }, + } + if !configureServiceManifest(sm, revision, traffics, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + // Add builtin labels for tracking application live state + commit := e.Deployment.CommitHash() + if !addBuiltinLabels(sm, commit, e.PipedConfig.PipedID, e.Deployment.ApplicationId, revision, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + if !apply(ctx, e.client, sm, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + if err := waitRevisionReady( + ctx, + e.client, + revision, + revisionCheckDuration, + revisionCheckTimeout, + e.LogPersister, + ); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensurePromote(ctx context.Context) model.StageStatus { + options := e.StageConfig.CloudRunPromoteStageOptions + if options == nil { + e.LogPersister.Errorf("Malformed configuration for stage %s", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + metadata := map[string]string{ + promotePercentageMetadataKey: strconv.FormatInt(int64(options.Percent.Int()), 10), + } + if err := e.MetadataStore.Stage(e.Stage.Id).PutMulti(ctx, metadata); err != nil { + e.Logger.Error("failed to save routing percentages to metadata", zap.Error(err)) + } + + // Loaded the last deployed data. + if e.Deployment.RunningCommitHash == "" { + e.LogPersister.Errorf("Unable to determine the last deployed commit") + return model.StageStatus_STAGE_FAILURE + } + + runningDS, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + runningAppCfg := runningDS.ApplicationConfig.CloudRunApplicationSpec + if runningAppCfg == nil { + e.LogPersister.Error("Malformed application configuration in running commit: missing CloudRunApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + lastDeployedSM, ok := loadServiceManifest(&e.Input, runningAppCfg.Input.ServiceManifestFile, runningDS) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + lastDeployedRevision, ok := decideRevisionName(lastDeployedSM, e.Deployment.RunningCommitHash, e.LogPersister) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + // Load the service manifest at the target commit. + sm, ok := loadServiceManifest(&e.Input, e.appCfg.Input.ServiceManifestFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + revision, ok := decideRevisionName(sm, e.Deployment.Trigger.Commit.Hash, e.LogPersister) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + traffics := []provider.RevisionTraffic{ + { + RevisionName: revision, + Percent: options.Percent.Int(), + }, + { + RevisionName: lastDeployedRevision, + Percent: 100 - options.Percent.Int(), + }, + } + + exist, err := revisionExists(ctx, e.client, revision, e.LogPersister) + if err != nil { + return model.StageStatus_STAGE_FAILURE + } + + newRevision := revision + if exist { + newRevision = "" + e.LogPersister.Infof("Revision %s was already registered", revision) + } + + if !configureServiceManifest(sm, newRevision, traffics, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + commit := e.Deployment.CommitHash() + if !addBuiltinLabels(sm, commit, e.PipedConfig.PipedID, e.Deployment.ApplicationId, newRevision, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + if !apply(ctx, e.client, sm, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + if err := waitRevisionReady( + ctx, + e.client, + revision, + revisionCheckDuration, + revisionCheckTimeout, + e.LogPersister, + ); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/cloudrun/rollback.go b/pkg/app/pipedv1/executor/cloudrun/rollback.go new file mode 100644 index 0000000000..3b7edb7b57 --- /dev/null +++ b/pkg/app/pipedv1/executor/cloudrun/rollback.go @@ -0,0 +1,110 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type rollbackExecutor struct { + executor.Input + client provider.Client +} + +func (e *rollbackExecutor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + ctx = sig.Context() + originalStatus = e.Stage.Status + status model.StageStatus + ) + + cpName, cpCfg, found := findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + var err error + e.client, err = provider.DefaultRegistry().Client(ctx, cpName, cpCfg, e.Logger) + if err != nil { + e.LogPersister.Errorf("Unable to create ClourRun client for the provider (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + switch model.Stage(e.Stage.Name) { + case model.StageRollback: + status = e.ensureRollback(ctx) + + default: + e.LogPersister.Errorf("Unsupported stage %s for cloudrun application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *rollbackExecutor) ensureRollback(ctx context.Context) model.StageStatus { + // There is nothing to do if this is the first deployment. + if e.Deployment.RunningCommitHash == "" { + e.LogPersister.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") + return model.StageStatus_STAGE_FAILURE + } + + runningDS, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + appCfg := runningDS.ApplicationConfig.CloudRunApplicationSpec + if appCfg == nil { + e.LogPersister.Error("Malformed application configuration: missing CloudRunApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + sm, ok := loadServiceManifest(&e.Input, appCfg.Input.ServiceManifestFile, runningDS) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + revision, ok := decideRevisionName(sm, e.Deployment.RunningCommitHash, e.LogPersister) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + traffics := []provider.RevisionTraffic{ + { + RevisionName: revision, + Percent: 100, + }, + } + if !configureServiceManifest(sm, revision, traffics, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + // Add builtin labels for tracking application live state) + if !addBuiltinLabels(sm, e.Deployment.RunningCommitHash, e.PipedConfig.PipedID, e.Deployment.ApplicationId, revision, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + if !apply(ctx, e.client, sm, e.LogPersister) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/customsync/customsync.go b/pkg/app/pipedv1/executor/customsync/customsync.go new file mode 100644 index 0000000000..d80085ea98 --- /dev/null +++ b/pkg/app/pipedv1/executor/customsync/customsync.go @@ -0,0 +1,121 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package customsync + +import ( + "os" + "os/exec" + "strings" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type deployExecutor struct { + executor.Input + + repoDir string + appDir string +} + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error + RegisterRollback(kind model.RollbackKind, f executor.Factory) error +} + +// Register registers this executor factory into a given registerer. +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &deployExecutor{ + Input: in, + } + } + r.Register(model.StageCustomSync, f) + r.RegisterRollback(model.RollbackKind_Rollback_CUSTOM_SYNC, func(in executor.Input) executor.Executor { + return &rollbackExecutor{ + Input: in, + } + }) +} + +// Execute exec the user-defined scripts in timeout duration. +func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus { + var originalStatus = e.Stage.Status + ctx := sig.Context() + ds, err := e.TargetDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.repoDir = ds.RepoDir + e.appDir = ds.AppDir + + timeout := e.StageConfig.CustomSyncOptions.Timeout.Duration() + + c := make(chan model.StageStatus, 1) + go func() { + c <- e.executeCommand() + }() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + for { + select { + case result := <-c: + return result + case <-timer.C: + e.LogPersister.Errorf("Canceled because of timeout") + return model.StageStatus_STAGE_FAILURE + + case s := <-sig.Ch(): + switch s { + case executor.StopSignalCancel: + return model.StageStatus_STAGE_CANCELLED + case executor.StopSignalTerminate: + return originalStatus + default: + return model.StageStatus_STAGE_FAILURE + } + } + } +} + +func (e *deployExecutor) executeCommand() model.StageStatus { + opts := e.StageConfig.CustomSyncOptions + + e.LogPersister.Infof("Runnnig commands...") + for _, v := range strings.Split(opts.Run, "\n") { + if v != "" { + e.LogPersister.Infof(" %s", v) + } + } + + envs := make([]string, 0, len(opts.Envs)) + for key, value := range opts.Envs { + envs = append(envs, key+"="+value) + } + + cmd := exec.Command("/bin/sh", "-l", "-c", opts.Run) + cmd.Dir = e.appDir + cmd.Env = append(os.Environ(), envs...) + cmd.Stdout = e.LogPersister + cmd.Stderr = e.LogPersister + if err := cmd.Run(); err != nil { + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/customsync/rollback.go b/pkg/app/pipedv1/executor/customsync/rollback.go new file mode 100644 index 0000000000..6aaaaa0abc --- /dev/null +++ b/pkg/app/pipedv1/executor/customsync/rollback.go @@ -0,0 +1,105 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package customsync + +import ( + "context" + "os" + "os/exec" + "strings" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type rollbackExecutor struct { + executor.Input + + repoDir string + appDir string +} + +func (e *rollbackExecutor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + ctx = sig.Context() + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageCustomSyncRollback: + status = e.ensureRollback(ctx) + default: + e.LogPersister.Errorf("Unsupported stage %s", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *rollbackExecutor) ensureRollback(ctx context.Context) model.StageStatus { + // Not rollback in case this is the first deployment. + if e.Deployment.RunningCommitHash == "" { + e.LogPersister.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") + return model.StageStatus_STAGE_FAILURE + } + + runningDS, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.appDir = runningDS.AppDir + + if len(runningDS.GenericApplicationConfig.Pipeline.Stages) > 1 { + e.LogPersister.Errorf("There are more than one custom sync stages in the running commit.") + return model.StageStatus_STAGE_FAILURE + } + + if runningDS.GenericApplicationConfig.Pipeline.Stages[0].Name.String() != string(model.StageCustomSync) { + e.LogPersister.Errorf("There are no custom sync in the running commit") + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Infof("Start rollback for custom sync") + + return e.executeCommand(runningDS.GenericApplicationConfig.Pipeline.Stages[0]) +} + +func (e *rollbackExecutor) executeCommand(config config.PipelineStage) model.StageStatus { + opts := config.CustomSyncOptions + + e.LogPersister.Infof("Runnnig commands...") + for _, v := range strings.Split(opts.Run, "\n") { + if v != "" { + e.LogPersister.Infof(" %s", v) + } + } + + envs := make([]string, 0, len(opts.Envs)) + for key, value := range opts.Envs { + envs = append(envs, key+"="+value) + } + + cmd := exec.Command("/bin/sh", "-l", "-c", opts.Run) + cmd.Dir = e.appDir + cmd.Env = append(os.Environ(), envs...) + cmd.Stdout = e.LogPersister + cmd.Stderr = e.LogPersister + if err := cmd.Run(); err != nil { + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/ecs/deploy.go b/pkg/app/pipedv1/executor/ecs/deploy.go new file mode 100644 index 0000000000..4f93de0435 --- /dev/null +++ b/pkg/app/pipedv1/executor/ecs/deploy.go @@ -0,0 +1,221 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type deployExecutor struct { + executor.Input + + deploySource *deploysource.DeploySource + appCfg *config.ECSApplicationSpec + platformProviderName string + platformProviderCfg *config.PlatformProviderECSConfig +} + +func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus { + ctx := sig.Context() + ds, err := e.TargetDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.deploySource = ds + e.appCfg = ds.ApplicationConfig.ECSApplicationSpec + if e.appCfg == nil { + e.LogPersister.Errorf("Malformed application configuration: missing ECSApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + var found bool + e.platformProviderName, e.platformProviderCfg, found = findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + var ( + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageECSSync: + status = e.ensureSync(ctx) + case model.StageECSCanaryRollout: + status = e.ensureCanaryRollout(ctx) + case model.StageECSPrimaryRollout: + status = e.ensurePrimaryRollout(ctx) + case model.StageECSCanaryClean: + status = e.ensureCanaryClean(ctx) + case model.StageECSTrafficRouting: + status = e.ensureTrafficRouting(ctx) + default: + e.LogPersister.Errorf("Unsupported stage %s for ECS application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *deployExecutor) ensureSync(ctx context.Context) model.StageStatus { + ecsInput := e.appCfg.Input + + taskDefinition, ok := loadTaskDefinition(&e.Input, ecsInput.TaskDefinitionFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + if ecsInput.IsStandaloneTask() { + if !runStandaloneTask(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, taskDefinition, &ecsInput) { + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS + } + + servicedefinition, ok := loadServiceDefinition(&e.Input, ecsInput.ServiceDefinitionFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + var primary *types.LoadBalancer + // When the service is not accessed via ELB, the target group is not used. + if ecsInput.IsAccessedViaELB() { + primary, _, ok = loadTargetGroups(&e.Input, e.appCfg, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + } + + recreate := e.appCfg.QuickSync.Recreate + if !sync(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, recreate, taskDefinition, servicedefinition, primary) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensurePrimaryRollout(ctx context.Context) model.StageStatus { + taskDefinition, ok := loadTaskDefinition(&e.Input, e.appCfg.Input.TaskDefinitionFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + servicedefinition, ok := loadServiceDefinition(&e.Input, e.appCfg.Input.ServiceDefinitionFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + switch e.appCfg.Input.AccessType { + case config.AccessTypeELB: + primary, _, ok := loadTargetGroups(&e.Input, e.appCfg, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + if primary == nil { + e.LogPersister.Error("Primary target group is required to enable rolling out PRIMARY variant") + return model.StageStatus_STAGE_FAILURE + } + + if !rollout(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, taskDefinition, servicedefinition, primary) { + return model.StageStatus_STAGE_FAILURE + } + case config.AccessTypeServiceDiscovery: + // Target groups are not used. + if !rollout(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, taskDefinition, servicedefinition, nil) { + return model.StageStatus_STAGE_FAILURE + } + default: + e.LogPersister.Errorf("Unsupported access type %s in stage %s for ECS application", e.appCfg.Input.AccessType, e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensureCanaryRollout(ctx context.Context) model.StageStatus { + taskDefinition, ok := loadTaskDefinition(&e.Input, e.appCfg.Input.TaskDefinitionFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + servicedefinition, ok := loadServiceDefinition(&e.Input, e.appCfg.Input.ServiceDefinitionFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + switch e.appCfg.Input.AccessType { + case config.AccessTypeELB: + _, canary, ok := loadTargetGroups(&e.Input, e.appCfg, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + if canary == nil { + e.LogPersister.Error("Canary target group is required to enable rolling out CANARY variant") + return model.StageStatus_STAGE_FAILURE + } + + if !rollout(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, taskDefinition, servicedefinition, canary) { + return model.StageStatus_STAGE_FAILURE + } + case config.AccessTypeServiceDiscovery: + // Target groups are not used. + if !rollout(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, taskDefinition, servicedefinition, nil) { + return model.StageStatus_STAGE_FAILURE + } + default: + e.LogPersister.Errorf("Unsupported access type %s in stage %s for ECS application", e.appCfg.Input.AccessType, e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensureTrafficRouting(ctx context.Context) model.StageStatus { + // Traffic Routing is not supported for other kinds than ELB. + if !e.appCfg.Input.IsAccessedViaELB() { + e.LogPersister.Errorf("Unsupported access type %s in stage %s for ECS application", e.appCfg.Input.AccessType, e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + primary, canary, ok := loadTargetGroups(&e.Input, e.appCfg, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + if primary == nil || canary == nil { + e.LogPersister.Error("Primary/Canary target group are required to enable traffic routing") + return model.StageStatus_STAGE_FAILURE + } + + if !routing(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, *primary, *canary) { + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensureCanaryClean(ctx context.Context) model.StageStatus { + if !clean(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/ecs/ecs.go b/pkg/app/pipedv1/executor/ecs/ecs.go new file mode 100644 index 0000000000..afb80b64cc --- /dev/null +++ b/pkg/app/pipedv1/executor/ecs/ecs.go @@ -0,0 +1,477 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/ecs" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + // Canary task set metadata keys. + canaryTaskSetKeyName = "canary-taskset-object" + // Stage metadata keys. + trafficRoutePrimaryMetadataKey = "primary-percentage" + trafficRouteCanaryMetadataKey = "canary-percentage" + canaryScaleMetadataKey = "canary-scale" + currentListenersKey = "current-listeners" +) + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error + RegisterRollback(kind model.RollbackKind, f executor.Factory) error +} + +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &deployExecutor{ + Input: in, + } + } + r.Register(model.StageECSSync, f) + r.Register(model.StageECSCanaryRollout, f) + r.Register(model.StageECSPrimaryRollout, f) + r.Register(model.StageECSCanaryClean, f) + r.Register(model.StageECSTrafficRouting, f) + + r.RegisterRollback(model.RollbackKind_Rollback_ECS, func(in executor.Input) executor.Executor { + return &rollbackExecutor{ + Input: in, + } + }) +} + +func findPlatformProvider(in *executor.Input) (name string, cfg *config.PlatformProviderECSConfig, found bool) { + name = in.Application.PlatformProvider + if name == "" { + in.LogPersister.Errorf("Missing the PlatformProvider name in the application configuration") + return + } + + cp, ok := in.PipedConfig.FindPlatformProvider(name, model.ApplicationKind_ECS) + if !ok { + in.LogPersister.Errorf("The specified platform provider %q was not found in piped configuration", name) + return + } + + cfg = cp.ECSConfig + found = true + return +} + +func loadServiceDefinition(in *executor.Input, serviceDefinitionFile string, ds *deploysource.DeploySource) (types.Service, bool) { + in.LogPersister.Infof("Loading service manifest at commit %s", ds.Revision) + + serviceDefinition, err := provider.LoadServiceDefinition(ds.AppDir, serviceDefinitionFile) + if err != nil { + in.LogPersister.Errorf("Failed to load ECS service definition (%v)", err) + return types.Service{}, false + } + + serviceDefinition.Tags = append( + serviceDefinition.Tags, + provider.MakeTags(map[string]string{ + provider.LabelManagedBy: provider.ManagedByPiped, + provider.LabelPiped: in.PipedConfig.PipedID, + provider.LabelApplication: in.Deployment.ApplicationId, + provider.LabelCommitHash: in.Deployment.CommitHash(), + })..., + ) + + in.LogPersister.Infof("Successfully loaded the ECS service definition at commit %s", ds.Revision) + return serviceDefinition, true +} + +func loadTaskDefinition(in *executor.Input, taskDefinitionFile string, ds *deploysource.DeploySource) (types.TaskDefinition, bool) { + in.LogPersister.Infof("Loading task definition manifest at commit %s", ds.Revision) + + taskDefinition, err := provider.LoadTaskDefinition(ds.AppDir, taskDefinitionFile) + if err != nil { + in.LogPersister.Errorf("Failed to load ECS task definition (%v)", err) + return types.TaskDefinition{}, false + } + + in.LogPersister.Infof("Successfully loaded the ECS task definition at commit %s", ds.Revision) + return taskDefinition, true +} + +func loadTargetGroups(in *executor.Input, appCfg *config.ECSApplicationSpec, ds *deploysource.DeploySource) (*types.LoadBalancer, *types.LoadBalancer, bool) { + in.LogPersister.Infof("Loading target groups config at the commit %s", ds.Revision) + + primary, canary, err := provider.LoadTargetGroups(appCfg.Input.TargetGroups) + if err != nil && !errors.Is(err, provider.ErrNoTargetGroup) { + in.LogPersister.Errorf("Failed to load TargetGroups (%v)", err) + return nil, nil, false + } + + if errors.Is(err, provider.ErrNoTargetGroup) { + in.LogPersister.Infof("No target groups were set at commit %s", ds.Revision) + return nil, nil, true + } + + in.LogPersister.Infof("Successfully loaded the ECS target groups at commit %s", ds.Revision) + return primary, canary, true +} + +func applyTaskDefinition(ctx context.Context, cli provider.Client, taskDefinition types.TaskDefinition) (*types.TaskDefinition, error) { + td, err := cli.RegisterTaskDefinition(ctx, taskDefinition) + if err != nil { + return nil, fmt.Errorf("unable to register ECS task definition of family %s: %w", *taskDefinition.Family, err) + } + return td, nil +} + +func applyServiceDefinition(ctx context.Context, cli provider.Client, serviceDefinition types.Service) (*types.Service, error) { + found, err := cli.ServiceExists(ctx, *serviceDefinition.ClusterArn, *serviceDefinition.ServiceName) + if err != nil { + return nil, fmt.Errorf("unable to validate service name %s: %w", *serviceDefinition.ServiceName, err) + } + + var service *types.Service + if found { + service, err = cli.UpdateService(ctx, serviceDefinition) + if err != nil { + return nil, fmt.Errorf("failed to update ECS service %s: %w", *serviceDefinition.ServiceName, err) + } + if err := cli.TagResource(ctx, *service.ServiceArn, serviceDefinition.Tags); err != nil { + return nil, fmt.Errorf("failed to update tags of ECS service %s: %w", *serviceDefinition.ServiceName, err) + } + // Re-assign tags to service object because UpdateService API doesn't return tags. + service.Tags = serviceDefinition.Tags + + } else { + service, err = cli.CreateService(ctx, serviceDefinition) + if err != nil { + return nil, fmt.Errorf("failed to create ECS service %s: %w", *serviceDefinition.ServiceName, err) + } + } + + return service, nil +} + +func runStandaloneTask( + ctx context.Context, + in *executor.Input, + cloudProviderName string, + cloudProviderCfg *config.PlatformProviderECSConfig, + taskDefinition types.TaskDefinition, + ecsInput *config.ECSDeploymentInput, +) bool { + client, err := provider.DefaultRegistry().Client(cloudProviderName, cloudProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create ECS client for the provider %s: %v", cloudProviderName, err) + return false + } + + in.LogPersister.Infof("Start applying the ECS task definition") + tags := provider.MakeTags(map[string]string{ + provider.LabelManagedBy: provider.ManagedByPiped, + provider.LabelPiped: in.PipedConfig.PipedID, + provider.LabelApplication: in.Deployment.ApplicationId, + provider.LabelCommitHash: in.Deployment.CommitHash(), + }) + td, err := applyTaskDefinition(ctx, client, taskDefinition) + if err != nil { + in.LogPersister.Errorf("Failed to apply ECS task definition: %v", err) + return false + } + + if !*ecsInput.RunStandaloneTask { + in.LogPersister.Infof("Skipped running task") + return true + } + + err = client.RunTask( + ctx, + *td, + ecsInput.ClusterArn, + ecsInput.LaunchType, + &ecsInput.AwsVpcConfiguration, + tags, + ) + if err != nil { + in.LogPersister.Errorf("Failed to run ECS task: %v", err) + return false + } + return true +} + +func createPrimaryTaskSet(ctx context.Context, client provider.Client, service types.Service, taskDef types.TaskDefinition, targetGroup *types.LoadBalancer) error { + // Get current PRIMARY/ACTIVE task sets. + prevTaskSets, err := client.GetServiceTaskSets(ctx, service) + if err != nil { + return err + } + + // Create a task set in the specified cluster and service. + // In case of creating Primary taskset, the number of desired tasks scale is always set to 100 + // which means we create as many tasks as the current primary taskset has. + taskSet, err := client.CreateTaskSet(ctx, service, taskDef, targetGroup, 100) + if err != nil { + return err + } + + // Make new taskSet as PRIMARY task set, so that it will handle production service. + if _, err = client.UpdateServicePrimaryTaskSet(ctx, service, *taskSet); err != nil { + return err + } + + // Remove old taskSets if existed. + // HACK: All old task sets including canary are deleted here. + // However, we need to discuss whether we should delete the canary here or in later stage(CanaryClean). + for _, prevTaskSet := range prevTaskSets { + if err = client.DeleteTaskSet(ctx, *prevTaskSet); err != nil { + return err + } + } + + return nil +} + +func sync(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderECSConfig, recreate bool, taskDefinition types.TaskDefinition, serviceDefinition types.Service, targetGroup *types.LoadBalancer) bool { + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create ECS client for the provider %s: %v", platformProviderName, err) + return false + } + + in.LogPersister.Infof("Start applying the ECS task definition") + td, err := applyTaskDefinition(ctx, client, taskDefinition) + if err != nil { + in.LogPersister.Errorf("Failed to apply ECS task definition: %v", err) + return false + } + + in.LogPersister.Infof("Start applying the ECS service definition") + service, err := applyServiceDefinition(ctx, client, serviceDefinition) + if err != nil { + in.LogPersister.Errorf("Failed to apply service %s: %v", *serviceDefinition.ServiceName, err) + return false + } + + if recreate { + cnt := service.DesiredCount + // Scale down the service tasks by set it to 0 + in.LogPersister.Infof("Scale down ECS desired tasks count to 0") + service.DesiredCount = 0 + if _, err = client.UpdateService(ctx, *service); err != nil { + in.LogPersister.Errorf("Failed to stop service tasks: %v", err) + return false + } + + in.LogPersister.Infof("Start rolling out ECS task set") + if err := createPrimaryTaskSet(ctx, client, *service, *td, targetGroup); err != nil { + in.LogPersister.Errorf("Failed to rolling out ECS task set for service %s: %v", *serviceDefinition.ServiceName, err) + return false + } + + // Scale up the service tasks count back to its desired. + in.LogPersister.Infof("Scale up ECS desired tasks count back to %d", cnt) + service.DesiredCount = cnt + if _, err = client.UpdateService(ctx, *service); err != nil { + in.LogPersister.Errorf("Failed to turning back service tasks: %v", err) + return false + } + } else { + in.LogPersister.Infof("Start rolling out ECS task set") + if err := createPrimaryTaskSet(ctx, client, *service, *td, targetGroup); err != nil { + in.LogPersister.Errorf("Failed to rolling out ECS task set for service %s: %v", *serviceDefinition.ServiceName, err) + return false + } + } + + in.LogPersister.Infof("Wait service to reach stable state") + if err := client.WaitServiceStable(ctx, *service); err != nil { + in.LogPersister.Errorf("Failed to wait service %s to reach stable state: %v", *serviceDefinition.ServiceName, err) + return false + } + + in.LogPersister.Infof("Successfully applied the service definition and the task definition for ECS service %s and task definition of family %s", *serviceDefinition.ServiceName, *taskDefinition.Family) + return true +} + +func rollout(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderECSConfig, taskDefinition types.TaskDefinition, serviceDefinition types.Service, targetGroup *types.LoadBalancer) bool { + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create ECS client for the provider %s: %v", platformProviderName, err) + return false + } + + in.LogPersister.Infof("Start applying the ECS task definition") + td, err := applyTaskDefinition(ctx, client, taskDefinition) + if err != nil { + in.LogPersister.Errorf("Failed to apply ECS task definition: %v", err) + return false + } + + in.LogPersister.Infof("Start applying the ECS service definition") + service, err := applyServiceDefinition(ctx, client, serviceDefinition) + if err != nil { + in.LogPersister.Errorf("Failed to apply service %s: %v", *serviceDefinition.ServiceName, err) + return false + } + + // Create a task set in the specified cluster and service. + in.LogPersister.Infof("Start rolling out ECS task set") + if in.StageConfig.Name == model.StageECSPrimaryRollout { + // Create PRIMARY task set in case of Primary rollout. + if err := createPrimaryTaskSet(ctx, client, *service, *td, targetGroup); err != nil { + in.LogPersister.Errorf("Failed to rolling out ECS task set for service %s: %v", *serviceDefinition.ServiceName, err) + return false + } + } else { + // Load Canary rollout stage options to get scale configuration. + options := in.StageConfig.ECSCanaryRolloutStageOptions + if options == nil { + in.LogPersister.Errorf("Malformed configuration for stage %s", in.Stage.Name) + return false + } + + metadata := map[string]string{ + canaryScaleMetadataKey: strconv.FormatInt(int64(options.Scale.Int()), 10), + } + if err := in.MetadataStore.Stage(in.Stage.Id).PutMulti(ctx, metadata); err != nil { + in.Logger.Error("Failed to store canary scale infor to metadata store", zap.Error(err)) + } + + // Create ACTIVE task set in case of Canary rollout. + taskSet, err := client.CreateTaskSet(ctx, *service, *td, targetGroup, options.Scale.Int()) + if err != nil { + in.LogPersister.Errorf("Failed to create ECS task set for service %s: %v", *serviceDefinition.ServiceName, err) + return false + } + // Store created ACTIVE TaskSet (CANARY variant) to delete later. + taskSetObjData, err := json.Marshal(taskSet) + if err != nil { + in.LogPersister.Errorf("Unable to store created active taskSet to metadata store: %v", err) + return false + } + if err := in.MetadataStore.Shared().Put(ctx, canaryTaskSetKeyName, string(taskSetObjData)); err != nil { + in.LogPersister.Errorf("Unable to store created active taskSet to metadata store: %v", err) + return false + } + } + + in.LogPersister.Infof("Wait service to reach stable state") + if err := client.WaitServiceStable(ctx, *service); err != nil { + in.LogPersister.Errorf("Failed to wait service %s to reach stable state: %v", *serviceDefinition.ServiceName, err) + return false + } + + in.LogPersister.Infof("Successfully applied the service definition and the task definition for ECS service %s and task definition of family %s", *serviceDefinition.ServiceName, *taskDefinition.Family) + return true +} + +func clean(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderECSConfig) bool { + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create ECS client for the provider %s: %v", platformProviderName, err) + return false + } + + // Get task set object from metadata store. + taskSetObjData, ok := in.MetadataStore.Shared().Get(canaryTaskSetKeyName) + if !ok { + in.LogPersister.Error("Unable to restore taskset to clean: Not found") + return false + } + taskSet := &types.TaskSet{} + if err := json.Unmarshal([]byte(taskSetObjData), taskSet); err != nil { + in.LogPersister.Errorf("Unable to restore taskset to clean: %v", err) + return false + } + + // Delete canary task set if present. + in.LogPersister.Infof("Cleaning CANARY task set %s from service %s", *taskSet.TaskSetArn, *taskSet.ServiceArn) + if err := client.DeleteTaskSet(ctx, *taskSet); err != nil { + in.LogPersister.Errorf("Failed to clean CANARY task set %s: %v", *taskSet.TaskSetArn, err) + return false + } + + in.LogPersister.Infof("Successfully cleaned CANARY task set %s from service %s", *taskSet.TaskSetArn, *taskSet.ServiceArn) + return true +} + +func routing(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderECSConfig, primaryTargetGroup types.LoadBalancer, canaryTargetGroup types.LoadBalancer) bool { + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create ECS client for the provider %s: %v", platformProviderName, err) + return false + } + + options := in.StageConfig.ECSTrafficRoutingStageOptions + if options == nil { + in.LogPersister.Errorf("Malformed configuration for stage %s", in.Stage.Name) + return false + } + primary, canary := options.Percentage() + routingTrafficCfg := provider.RoutingTrafficConfig{ + { + TargetGroupArn: *primaryTargetGroup.TargetGroupArn, + Weight: primary, + }, + { + TargetGroupArn: *canaryTargetGroup.TargetGroupArn, + Weight: canary, + }, + } + + metadataPercentage := map[string]string{ + trafficRoutePrimaryMetadataKey: strconv.FormatInt(int64(primary), 10), + trafficRouteCanaryMetadataKey: strconv.FormatInt(int64(canary), 10), + } + if err := in.MetadataStore.Stage(in.Stage.Id).PutMulti(ctx, metadataPercentage); err != nil { + in.Logger.Error("Failed to store traffic routing config to metadata store", zap.Error(err)) + } + + var currListenerArns []string + value, ok := in.MetadataStore.Shared().Get(currentListenersKey) + if ok { + currListenerArns = strings.Split(value, ",") + } else { + currListenerArns, err = client.GetListenerArns(ctx, primaryTargetGroup) + if err != nil { + in.LogPersister.Errorf("Failed to get current active listeners: %v", err) + return false + } + } + + // Store created listeners to use later. + metadata := strings.Join(currListenerArns, ",") + if err := in.MetadataStore.Shared().Put(ctx, currentListenersKey, metadata); err != nil { + in.LogPersister.Errorf("Unable to store created listeners to metadata store: %v", err) + return false + } + + if err := client.ModifyListeners(ctx, currListenerArns, routingTrafficCfg); err != nil { + in.LogPersister.Errorf("Failed to routing traffic to PRIMARY/CANARY variants: %v", err) + return false + } + + return true +} diff --git a/pkg/app/pipedv1/executor/ecs/rollback.go b/pkg/app/pipedv1/executor/ecs/rollback.go new file mode 100644 index 0000000000..32e7c6f106 --- /dev/null +++ b/pkg/app/pipedv1/executor/ecs/rollback.go @@ -0,0 +1,179 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "context" + "errors" + + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/ecs" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type rollbackExecutor struct { + executor.Input +} + +func (e *rollbackExecutor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + ctx = sig.Context() + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageRollback: + status = e.ensureRollback(ctx) + default: + e.LogPersister.Errorf("Unsupported stage %s for ECS application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *rollbackExecutor) ensureRollback(ctx context.Context) model.StageStatus { + // Not rollback in case this is the first deployment. + if e.Deployment.RunningCommitHash == "" { + e.LogPersister.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") + return model.StageStatus_STAGE_FAILURE + } + + runningDS, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + appCfg := runningDS.ApplicationConfig.ECSApplicationSpec + if appCfg == nil { + e.LogPersister.Errorf("Malformed application configuration: missing ECSApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + platformProviderName, platformProviderCfg, found := findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + taskDefinition, ok := loadTaskDefinition(&e.Input, appCfg.Input.TaskDefinitionFile, runningDS) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + serviceDefinition, ok := loadServiceDefinition(&e.Input, appCfg.Input.ServiceDefinitionFile, runningDS) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + primary, canary, ok := loadTargetGroups(&e.Input, appCfg, runningDS) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + if !rollback(ctx, &e.Input, platformProviderName, platformProviderCfg, taskDefinition, serviceDefinition, primary, canary) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func rollback(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderECSConfig, taskDefinition types.TaskDefinition, serviceDefinition types.Service, primaryTargetGroup *types.LoadBalancer, canaryTargetGroup *types.LoadBalancer) bool { + in.LogPersister.Infof("Start rollback the ECS service and task family: %s and %s to original stage", *serviceDefinition.ServiceName, *taskDefinition.Family) + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create ECS client for the provider %s: %v", platformProviderName, err) + return false + } + + // Re-register TaskDef to get TaskDefArn. + // Consider using DescribeServices and get services[0].taskSets[0].taskDefinition (taskDefinition of PRIMARY taskSet) + // then store it in metadata store and use for rollback instead. + td, err := client.RegisterTaskDefinition(ctx, taskDefinition) + if err != nil { + in.LogPersister.Errorf("Failed to register new revision of ECS task definition %s: %v", *taskDefinition.Family, err) + return false + } + + // Rollback ECS service configuration to previous state including commit-hash of the tag. + service, err := applyServiceDefinition(ctx, client, serviceDefinition) + if err != nil { + in.LogPersister.Errorf("Unable to rollback ECS service %s configuration to previous stage: %v", *serviceDefinition.ServiceName, err) + return false + } + + // Get current PRIMARY/ACTIVE task set. + prevTaskSets, err := client.GetServiceTaskSets(ctx, *service) + // Ignore error in case it's not found error, the prevTaskSets doesn't exist for newly created Service. + if err != nil && !errors.Is(err, platformprovider.ErrNotFound) { + in.LogPersister.Errorf("Failed to determine current ECS PRIMARY/ACTIVE taskSet of service %s for rollback: %v", *serviceDefinition.ServiceName, err) + return false + } + + // On rolling back, the scale of desired tasks will be set to 100 (same as the original state). + taskSet, err := client.CreateTaskSet(ctx, *service, *td, primaryTargetGroup, 100) + if err != nil { + in.LogPersister.Errorf("Failed to create ECS task set %s: %v", *serviceDefinition.ServiceName, err) + return false + } + + // Make new taskSet as PRIMARY task set, so that it will handle production service. + if _, err = client.UpdateServicePrimaryTaskSet(ctx, *service, *taskSet); err != nil { + in.LogPersister.Errorf("Failed to update PRIMARY ECS taskSet for service %s: %v", *serviceDefinition.ServiceName, err) + return false + } + + // Reset routing in case of rolling back progressive pipeline. + if primaryTargetGroup != nil && canaryTargetGroup != nil { + routingTrafficCfg := provider.RoutingTrafficConfig{ + { + TargetGroupArn: *primaryTargetGroup.TargetGroupArn, + Weight: 100, + }, + { + TargetGroupArn: *canaryTargetGroup.TargetGroupArn, + Weight: 0, + }, + } + + currListenerArns, err := client.GetListenerArns(ctx, *primaryTargetGroup) + if err != nil { + in.LogPersister.Errorf("Failed to get current active listeners: %v", err) + return false + } + + if err := client.ModifyListeners(ctx, currListenerArns, routingTrafficCfg); err != nil { + in.LogPersister.Errorf("Failed to routing traffic to PRIMARY/CANARY variants: %v", err) + return false + } + } + + // Delete previous ACTIVE taskSets + in.LogPersister.Infof("Start deleting previous ACTIVE taskSets") + for _, ts := range prevTaskSets { + in.LogPersister.Infof("Deleting previous ACTIVE taskSet %s", *ts.TaskSetArn) + if err := client.DeleteTaskSet(ctx, *ts); err != nil { + in.LogPersister.Errorf("Failed to remove previous ACTIVE taskSet %s: %v", *ts.TaskSetArn, err) + return false + } + } + + in.LogPersister.Infof("Rolled back the ECS service %s and task definition %s configuration to original stage", *serviceDefinition.ServiceName, *taskDefinition.Family) + return true +} diff --git a/pkg/app/pipedv1/executor/executor.go b/pkg/app/pipedv1/executor/executor.go index 7e73c7df1e..f31bd31f74 100644 --- a/pkg/app/pipedv1/executor/executor.go +++ b/pkg/app/pipedv1/executor/executor.go @@ -21,6 +21,7 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/metadatastore" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" "github.com/pipe-cd/pipecd/pkg/cache" "github.com/pipe-cd/pipecd/pkg/config" "github.com/pipe-cd/pipecd/pkg/git" @@ -49,6 +50,10 @@ type CommandLister interface { ListCommands() []model.ReportableCommand } +type AppLiveResourceLister interface { + ListKubernetesResources() ([]provider.Manifest, bool) +} + type AnalysisResultStore interface { GetLatestAnalysisResult(ctx context.Context) (*model.AnalysisResult, error) PutLatestAnalysisResult(ctx context.Context, analysisResult *model.AnalysisResult) error @@ -78,6 +83,7 @@ type Input struct { LogPersister LogPersister MetadataStore metadatastore.MetadataStore AppManifestsCache cache.Cache + AppLiveResourceLister AppLiveResourceLister AnalysisResultStore AnalysisResultStore Logger *zap.Logger Notifier Notifier diff --git a/pkg/app/pipedv1/executor/kubernetes/applier_group.go b/pkg/app/pipedv1/executor/kubernetes/applier_group.go new file mode 100644 index 0000000000..eccfffe465 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/applier_group.go @@ -0,0 +1,148 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "sort" + "strings" + + "go.uber.org/zap" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applierGetter interface { + Get(k provider.ResourceKey) (provider.Applier, error) +} + +type applierGroup struct { + resourceRoutes []config.KubernetesResourceRoute + appliers map[string]provider.Applier + labeledProviders map[string][]string + defaultApplier provider.Applier +} + +func newApplierGroup(defaultProvider string, appCfg config.KubernetesApplicationSpec, pipedCfg *config.PipedSpec, logger *zap.Logger) (*applierGroup, error) { + cp, ok := pipedCfg.FindPlatformProvider(defaultProvider, model.ApplicationKind_KUBERNETES) + if !ok { + return nil, fmt.Errorf("provider %s was not found", defaultProvider) + } + + defaultApplier := provider.NewApplier( + appCfg.Input, + *cp.KubernetesConfig, + logger, + ) + d := &applierGroup{ + resourceRoutes: appCfg.ResourceRoutes, + appliers: map[string]provider.Applier{defaultProvider: defaultApplier}, + labeledProviders: make(map[string][]string, 0), + defaultApplier: defaultApplier, + } + + for _, r := range appCfg.ResourceRoutes { + if name := r.Provider.Name; name != "" { + if _, ok := d.appliers[name]; ok { + continue + } + cp, found := pipedCfg.FindPlatformProvider(name, model.ApplicationKind_KUBERNETES) + if !found { + return nil, fmt.Errorf("provider %s specified in resourceRoutes was not found", name) + } + d.appliers[name] = provider.NewApplier(appCfg.Input, *cp.KubernetesConfig, logger) + continue + } + if labels := r.Provider.Labels; len(labels) > 0 { + cps := pipedCfg.FindPlatformProvidersByLabels(labels, model.ApplicationKind_KUBERNETES) + if len(cps) == 0 { + return nil, fmt.Errorf("there is no provider that matches the specified labels (%v)", labels) + } + names := make([]string, 0, len(cps)) + for _, cp := range cps { + if _, ok := d.appliers[cp.Name]; !ok { + d.appliers[cp.Name] = provider.NewApplier(appCfg.Input, *cp.KubernetesConfig, logger) + } + names = append(names, cp.Name) + } + // Save names of the labeled providers for search later. + key := makeKeyFromProviderLabels(labels) + d.labeledProviders[key] = names + } + } + + return d, nil +} + +func makeKeyFromProviderLabels(labels map[string]string) string { + labelList := make([]string, 0, len(labels)) + for k, v := range labels { + if v != "" { + labelList = append(labelList, fmt.Sprintf("%s:%s", k, v)) + } + } + sort.Strings(labelList) + return strings.Join(labelList, ",") +} + +// TODO: Add test for this applierGroup function. +func (d applierGroup) Get(rk provider.ResourceKey) (provider.Applier, error) { + resourceMatch := func(matcher *config.KubernetesResourceRouteMatcher) bool { + // Match any resource when the matcher was not specified. + if matcher == nil { + return true + } + if matcher.Kind != "" && matcher.Kind != rk.Kind { + return false + } + if matcher.Name != "" && matcher.Name != rk.Name { + return false + } + return true + } + + for _, r := range d.resourceRoutes { + if !resourceMatch(r.Match) { + continue + } + if name := r.Provider.Name; name != "" { + if a, ok := d.appliers[name]; ok { + return a, nil + } + return nil, fmt.Errorf("provider %s specified in resourceRoutes was not found", name) + } + if labels := r.Provider.Labels; len(labels) > 0 { + key := makeKeyFromProviderLabels(labels) + cps := d.labeledProviders[key] + if len(cps) == 0 { + return nil, fmt.Errorf("there are no provider that matches the specified labels (%v)", labels) + } + as := make([]provider.Applier, 0, len(cps)) + for _, cp := range cps { + if a, ok := d.appliers[cp]; ok { + as = append(as, a) + continue + } + return nil, fmt.Errorf("provider %s specified in resourceRoutes was not found", cp) + } + applier := provider.NewMultiApplier(as...) + return applier, nil + } + } + + return d.defaultApplier, nil +} diff --git a/pkg/app/pipedv1/executor/kubernetes/applier_group_test.go b/pkg/app/pipedv1/executor/kubernetes/applier_group_test.go new file mode 100644 index 0000000000..bedcaa8b62 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/applier_group_test.go @@ -0,0 +1,64 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMakeKeyFromProviderLabels(t *testing.T) { + testcases := []struct { + name string + labels map[string]string + want string + }{ + { + name: "empty", + want: "", + }, + { + name: "one label", + labels: map[string]string{ + "foo": "foo-1", + }, + want: "foo:foo-1", + }, + { + name: "multiple labels", + labels: map[string]string{ + "foo": "foo-1", + "bar": "bar-1", + }, + want: "bar:bar-1,foo:foo-1", + }, + { + name: "multiple labels in the reverse order", + labels: map[string]string{ + "bar": "bar-1", + "foo": "foo-1", + }, + want: "bar:bar-1,foo:foo-1", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got := makeKeyFromProviderLabels(tc.labels) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/executor/kubernetes/baseline.go b/pkg/app/pipedv1/executor/kubernetes/baseline.go new file mode 100644 index 0000000000..89afef8795 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/baseline.go @@ -0,0 +1,195 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "strings" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + addedBaselineResourcesMetadataKey = "baseline-resources" +) + +func (e *deployExecutor) ensureBaselineRollout(ctx context.Context) model.StageStatus { + var ( + runningCommit = e.Deployment.RunningCommitHash + options = e.StageConfig.K8sBaselineRolloutStageOptions + variantLabel = e.appCfg.VariantLabel.Key + baselineVariant = e.appCfg.VariantLabel.BaselineValue + ) + if options == nil { + e.LogPersister.Errorf("Malformed configuration for stage %s", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + // Load running manifests at the most successful deployed commit. + e.LogPersister.Infof("Loading running manifests at commit %s for handling", runningCommit) + manifests, err := e.loadRunningManifests(ctx) + if err != nil { + e.LogPersister.Errorf("Failed while loading running manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully loaded %d manifests", len(manifests)) + + if len(manifests) == 0 { + e.LogPersister.Error("This application has no running Kubernetes manifests to handle") + return model.StageStatus_STAGE_FAILURE + } + + baselineManifests, err := e.generateBaselineManifests(manifests, *options, variantLabel, baselineVariant) + if err != nil { + e.LogPersister.Errorf("Unable to generate manifests for BASELINE variant (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Add builtin annotations for tracking application live state. + addBuiltinAnnotations( + baselineManifests, + variantLabel, + baselineVariant, + runningCommit, + e.PipedConfig.PipedID, + e.Deployment.ApplicationId, + ) + + // Store added resource keys into metadata for cleaning later. + addedResources := make([]string, 0, len(baselineManifests)) + for _, m := range baselineManifests { + addedResources = append(addedResources, m.Key.String()) + } + metadata := strings.Join(addedResources, ",") + err = e.MetadataStore.Shared().Put(ctx, addedBaselineResourcesMetadataKey, metadata) + if err != nil { + e.LogPersister.Errorf("Unable to save deployment metadata (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Start rolling out the resources for BASELINE variant. + e.LogPersister.Info("Start rolling out BASELINE variant...") + if err := applyManifests(ctx, e.applierGetter, baselineManifests, e.appCfg.Input.Namespace, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + e.LogPersister.Success("Successfully rolled out BASELINE variant") + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensureBaselineClean(ctx context.Context) model.StageStatus { + value, ok := e.MetadataStore.Shared().Get(addedBaselineResourcesMetadataKey) + if !ok { + e.LogPersister.Error("Unable to determine the applied BASELINE resources") + return model.StageStatus_STAGE_FAILURE + } + + resources := strings.Split(value, ",") + if err := removeBaselineResources(ctx, e.applierGetter, resources, e.LogPersister); err != nil { + e.LogPersister.Errorf("Unable to remove baseline resources: %v", err) + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) generateBaselineManifests(manifests []provider.Manifest, opts config.K8sBaselineRolloutStageOptions, variantLabel, variant string) ([]provider.Manifest, error) { + suffix := variant + if opts.Suffix != "" { + suffix = opts.Suffix + } + + workloads := findWorkloadManifests(manifests, e.appCfg.Workloads) + if len(workloads) == 0 { + return nil, fmt.Errorf("unable to find any workload manifests for BASELINE variant") + } + + var baselineManifests []provider.Manifest + + // Find service manifests and duplicate them for BASELINE variant. + if opts.CreateService { + serviceName := e.appCfg.Service.Name + services := findManifests(provider.KindService, serviceName, manifests) + if len(services) == 0 { + return nil, fmt.Errorf("unable to find any service for name=%q", serviceName) + } + // Because the loaded manifests are read-only + // so we duplicate them to avoid updating the shared manifests data in cache. + services = duplicateManifests(services, "") + + generatedServices, err := generateVariantServiceManifests(services, variantLabel, variant, suffix) + if err != nil { + return nil, err + } + baselineManifests = append(baselineManifests, generatedServices...) + } + + // Generate new workload manifests for VANARY variant. + // The generated ones will mount to the new ConfigMaps and Secrets. + replicasCalculator := func(cur *int32) int32 { + if cur == nil { + return 1 + } + num := opts.Replicas.Calculate(int(*cur), 1) + return int32(num) + } + generatedWorkloads, err := generateVariantWorkloadManifests(workloads, nil, nil, variantLabel, variant, suffix, replicasCalculator) + if err != nil { + return nil, err + } + baselineManifests = append(baselineManifests, generatedWorkloads...) + + return baselineManifests, nil +} + +func removeBaselineResources(ctx context.Context, ag applierGetter, resources []string, lp executor.LogPersister) error { + if len(resources) == 0 { + return nil + } + + var ( + workloadKeys = make([]provider.ResourceKey, 0) + serviceKeys = make([]provider.ResourceKey, 0) + ) + for _, r := range resources { + key, err := provider.DecodeResourceKey(r) + if err != nil { + lp.Errorf("Had an error while decoding BASELINE resource key: %s, %v", r, err) + continue + } + if key.IsWorkload() { + workloadKeys = append(workloadKeys, key) + } else { + serviceKeys = append(serviceKeys, key) + } + } + + // We delete the service first to close all incoming connections. + lp.Info("Starting finding and deleting service resources of BASELINE variant") + if err := deleteResources(ctx, ag, serviceKeys, lp); err != nil { + return err + } + + // Next, delete all workloads. + lp.Info("Starting finding and deleting workload resources of BASELINE variant") + if err := deleteResources(ctx, ag, workloadKeys, lp); err != nil { + return err + } + + return nil +} diff --git a/pkg/app/pipedv1/executor/kubernetes/canary.go b/pkg/app/pipedv1/executor/kubernetes/canary.go new file mode 100644 index 0000000000..624610d88d --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/canary.go @@ -0,0 +1,225 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "strings" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + addedCanaryResourcesMetadataKey = "canary-resources" +) + +func (e *deployExecutor) ensureCanaryRollout(ctx context.Context) model.StageStatus { + var ( + options = e.StageConfig.K8sCanaryRolloutStageOptions + variantLabel = e.appCfg.VariantLabel.Key + canaryVariant = e.appCfg.VariantLabel.CanaryValue + ) + if options == nil { + e.LogPersister.Errorf("Malformed configuration for stage %s", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + // Load the manifests at the triggered commit. + e.LogPersister.Infof("Loading manifests at commit %s for handling", e.commit) + manifests, err := loadManifests( + ctx, + e.Deployment.ApplicationId, + e.commit, + e.AppManifestsCache, + e.loader, + e.Logger, + ) + if err != nil { + e.LogPersister.Errorf("Failed while loading manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully loaded %d manifests", len(manifests)) + + if len(manifests) == 0 { + e.LogPersister.Error("This application has no Kubernetes manifests to handle") + return model.StageStatus_STAGE_FAILURE + } + + // Patches the manifests if needed. + if len(options.Patches) > 0 { + e.LogPersister.Info("Patching manifests before generating for CANARY variant") + manifests, err = patchManifests(manifests, options.Patches, patchManifest) + if err != nil { + e.LogPersister.Errorf("Failed while patching manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + } + + // Find and generate workload & service manifests for CANARY variant. + canaryManifests, err := e.generateCanaryManifests(manifests, *options, variantLabel, canaryVariant) + if err != nil { + e.LogPersister.Errorf("Unable to generate manifests for CANARY variant (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Add builtin annotations for tracking application live state. + addBuiltinAnnotations( + canaryManifests, + variantLabel, + canaryVariant, + e.commit, + e.PipedConfig.PipedID, + e.Deployment.ApplicationId, + ) + + // Store added resource keys into metadata for cleaning later. + addedResources := make([]string, 0, len(canaryManifests)) + for _, m := range canaryManifests { + addedResources = append(addedResources, m.Key.String()) + } + metadata := strings.Join(addedResources, ",") + err = e.MetadataStore.Shared().Put(ctx, addedCanaryResourcesMetadataKey, metadata) + if err != nil { + e.LogPersister.Errorf("Unable to save deployment metadata (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Start rolling out the resources for CANARY variant. + e.LogPersister.Info("Start rolling out CANARY variant...") + if err := applyManifests(ctx, e.applierGetter, canaryManifests, e.appCfg.Input.Namespace, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + e.LogPersister.Success("Successfully rolled out CANARY variant") + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensureCanaryClean(ctx context.Context) model.StageStatus { + value, ok := e.MetadataStore.Shared().Get(addedCanaryResourcesMetadataKey) + if !ok { + e.LogPersister.Error("Unable to determine the applied CANARY resources") + return model.StageStatus_STAGE_FAILURE + } + + resources := strings.Split(value, ",") + if err := removeCanaryResources(ctx, e.applierGetter, resources, e.LogPersister); err != nil { + e.LogPersister.Errorf("Unable to remove canary resources: %v", err) + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) generateCanaryManifests(manifests []provider.Manifest, opts config.K8sCanaryRolloutStageOptions, variantLabel, variant string) ([]provider.Manifest, error) { + suffix := variant + if opts.Suffix != "" { + suffix = opts.Suffix + } + + workloads := findWorkloadManifests(manifests, e.appCfg.Workloads) + if len(workloads) == 0 { + return nil, fmt.Errorf("unable to find any workload manifests for CANARY variant") + } + + var canaryManifests []provider.Manifest + + // Find service manifests and duplicate them for CANARY variant. + if opts.CreateService { + serviceName := e.appCfg.Service.Name + services := findManifests(provider.KindService, serviceName, manifests) + if len(services) == 0 { + return nil, fmt.Errorf("unable to find any service for name=%q", serviceName) + } + // Because the loaded manifests are read-only + // so we duplicate them to avoid updating the shared manifests data in cache. + services = duplicateManifests(services, "") + + generatedServices, err := generateVariantServiceManifests(services, variantLabel, variant, suffix) + if err != nil { + return nil, err + } + canaryManifests = append(canaryManifests, generatedServices...) + } + + // Find config map manifests and duplicate them for CANARY variant. + configMaps := findConfigMapManifests(manifests) + canaryConfigMaps := duplicateManifests(configMaps, suffix) + canaryManifests = append(canaryManifests, canaryConfigMaps...) + + // Find secret manifests and duplicate them for CANARY variant. + secrets := findSecretManifests(manifests) + canarySecrets := duplicateManifests(secrets, suffix) + canaryManifests = append(canaryManifests, canarySecrets...) + + // Generate new workload manifests for CANARY variant. + // The generated ones will mount to the new ConfigMaps and Secrets. + replicasCalculator := func(cur *int32) int32 { + if cur == nil { + return 1 + } + num := opts.Replicas.Calculate(int(*cur), 1) + return int32(num) + } + // We don't need to duplicate the workload manifests + // because generateVariantWorkloadManifests function is already making a duplicate while decoding. + // workloads = duplicateManifests(workloads, suffix) + generatedWorkloads, err := generateVariantWorkloadManifests(workloads, configMaps, secrets, variantLabel, variant, suffix, replicasCalculator) + if err != nil { + return nil, err + } + canaryManifests = append(canaryManifests, generatedWorkloads...) + + return canaryManifests, nil +} + +func removeCanaryResources(ctx context.Context, ag applierGetter, resources []string, lp executor.LogPersister) error { + if len(resources) == 0 { + return nil + } + + var ( + workloadKeys = make([]provider.ResourceKey, 0) + serviceKeys = make([]provider.ResourceKey, 0) + ) + for _, r := range resources { + key, err := provider.DecodeResourceKey(r) + if err != nil { + lp.Errorf("Had an error while decoding CANARY resource key: %s, %v", r, err) + continue + } + if key.IsWorkload() { + workloadKeys = append(workloadKeys, key) + } else { + serviceKeys = append(serviceKeys, key) + } + } + + // We delete the service first to close all incoming connections. + lp.Info("Starting finding and deleting service resources of CANARY variant") + if err := deleteResources(ctx, ag, serviceKeys, lp); err != nil { + return err + } + + // Next, delete all workloads. + lp.Info("Starting finding and deleting workload resources of CANARY variant") + if err := deleteResources(ctx, ag, workloadKeys, lp); err != nil { + return err + } + + return nil +} diff --git a/pkg/app/pipedv1/executor/kubernetes/canary_test.go b/pkg/app/pipedv1/executor/kubernetes/canary_test.go new file mode 100644 index 0000000000..49aaa9bf77 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/canary_test.go @@ -0,0 +1,251 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes/kubernetestest" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/cache/cachetest" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestEnsureCanaryRollout(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + appCfg := &config.KubernetesApplicationSpec{ + VariantLabel: config.KubernetesVariantLabel{ + Key: "pipecd.dev/variant", + PrimaryValue: "primary", + BaselineValue: "baseline", + CanaryValue: "canary", + }, + } + testcases := []struct { + name string + executor *deployExecutor + want model.StageStatus + }{ + { + name: "malformed configuration", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + Stage: &model.PipelineStage{}, + LogPersister: &fakeLogPersister{}, + Logger: zap.NewNop(), + }, + appCfg: appCfg, + }, + }, + { + name: "failed to load manifest", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sCanaryRolloutStageOptions: &config.K8sCanaryRolloutStageOptions{}, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return(nil, fmt.Errorf("error")) + return p + }(), + appCfg: appCfg, + }, + }, + { + name: "no manifests to handle", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sCanaryRolloutStageOptions: &config.K8sCanaryRolloutStageOptions{}, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{}, nil) + return p + }(), + appCfg: appCfg, + }, + }, + { + name: "failed to apply manifests", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sCanaryRolloutStageOptions: &config.K8sCanaryRolloutStageOptions{}, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + MetadataStore: &fakeMetadataStore{}, + PipedConfig: &config.PipedSpec{}, + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "spec": map[string]interface{}{ + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{"app": "foo"}, + }, + }, + }, + }), + }, nil) + return p + }(), + applierGetter: &applierGroup{ + defaultApplier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(fmt.Errorf("error")) + return p + }(), + }, + appCfg: &config.KubernetesApplicationSpec{}, + }, + }, + { + name: "successfully applying manifests", + want: model.StageStatus_STAGE_SUCCESS, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sCanaryRolloutStageOptions: &config.K8sCanaryRolloutStageOptions{}, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + MetadataStore: &fakeMetadataStore{}, + PipedConfig: &config.PipedSpec{}, + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "spec": map[string]interface{}{ + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{"app": "foo"}, + }, + }, + }, + }), + }, nil) + return p + }(), + applierGetter: &applierGroup{ + defaultApplier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + }, + appCfg: &config.KubernetesApplicationSpec{}, + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + got := tc.executor.ensureCanaryRollout(ctx) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/executor/kubernetes/kubernetes.go b/pkg/app/pipedv1/executor/kubernetes/kubernetes.go new file mode 100644 index 0000000000..27972f42b0 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/kubernetes.go @@ -0,0 +1,773 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" + "github.com/pipe-cd/pipecd/pkg/yamlprocessor" +) + +type deployExecutor struct { + executor.Input + + commit string + appCfg *config.KubernetesApplicationSpec + + loader provider.Loader + applierGetter applierGetter +} + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error + RegisterRollback(kind model.RollbackKind, f executor.Factory) error +} + +// Register registers this executor factory into a given registerer. +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &deployExecutor{ + Input: in, + } + } + + r.Register(model.StageK8sSync, f) + r.Register(model.StageK8sPrimaryRollout, f) + r.Register(model.StageK8sCanaryRollout, f) + r.Register(model.StageK8sCanaryClean, f) + r.Register(model.StageK8sBaselineRollout, f) + r.Register(model.StageK8sBaselineClean, f) + r.Register(model.StageK8sTrafficRouting, f) + + r.RegisterRollback(model.RollbackKind_Rollback_KUBERNETES, func(in executor.Input) executor.Executor { + return &rollbackExecutor{ + Input: in, + } + }) +} + +func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus { + ctx := sig.Context() + e.commit = e.Deployment.Trigger.Commit.Hash + + ds, err := e.TargetDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.appCfg = ds.ApplicationConfig.KubernetesApplicationSpec + if e.appCfg == nil { + e.LogPersister.Error("Malformed application configuration: missing KubernetesApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + if e.appCfg.Input.HelmChart != nil { + chartRepoName := e.appCfg.Input.HelmChart.Repository + if chartRepoName != "" { + e.appCfg.Input.HelmChart.Insecure = e.PipedConfig.IsInsecureChartRepository(chartRepoName) + } + } + + if e.appCfg.Input.KubectlVersion != "" { + e.LogPersister.Infof("kubectl version %s will be used.", e.appCfg.Input.KubectlVersion) + } + + e.applierGetter, err = newApplierGroup(e.Deployment.PlatformProvider, *e.appCfg, e.PipedConfig, e.Logger) + if err != nil { + e.LogPersister.Error(err.Error()) + return model.StageStatus_STAGE_FAILURE + } + + e.loader = provider.NewLoader( + e.Deployment.ApplicationName, + ds.AppDir, + ds.RepoDir, + e.Deployment.GitPath.ConfigFilename, + e.appCfg.Input, + e.GitClient, + e.Logger, + ) + + e.Logger.Info("start executing kubernetes stage", + zap.String("stage-name", e.Stage.Name), + zap.String("app-dir", ds.AppDir), + ) + + var ( + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageK8sSync: + status = e.ensureSync(ctx) + + case model.StageK8sPrimaryRollout: + status = e.ensurePrimaryRollout(ctx) + + case model.StageK8sCanaryRollout: + status = e.ensureCanaryRollout(ctx) + + case model.StageK8sCanaryClean: + status = e.ensureCanaryClean(ctx) + + case model.StageK8sBaselineRollout: + status = e.ensureBaselineRollout(ctx) + + case model.StageK8sBaselineClean: + status = e.ensureBaselineClean(ctx) + + case model.StageK8sTrafficRouting: + status = e.ensureTrafficRouting(ctx) + + default: + e.LogPersister.Errorf("Unsupported stage %s for kubernetes application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *deployExecutor) loadRunningManifests(ctx context.Context) (manifests []provider.Manifest, err error) { + commit := e.Deployment.RunningCommitHash + if commit == "" { + return nil, fmt.Errorf("unable to determine running commit") + } + + loader := &manifestsLoadFunc{ + loadFunc: func(ctx context.Context) ([]provider.Manifest, error) { + ds, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source (%v)", err) + return nil, err + } + + loader := provider.NewLoader( + e.Deployment.ApplicationName, + ds.AppDir, + ds.RepoDir, + e.Deployment.GitPath.ConfigFilename, + e.appCfg.Input, + e.GitClient, + e.Logger, + ) + return loader.LoadManifests(ctx) + }, + } + + return loadManifests(ctx, e.Deployment.ApplicationId, commit, e.AppManifestsCache, loader, e.Logger) +} + +type manifestsLoadFunc struct { + loadFunc func(context.Context) ([]provider.Manifest, error) +} + +func (l *manifestsLoadFunc) LoadManifests(ctx context.Context) ([]provider.Manifest, error) { + return l.loadFunc(ctx) +} + +func loadManifests(ctx context.Context, appID, commit string, manifestsCache cache.Cache, loader provider.Loader, logger *zap.Logger) (manifests []provider.Manifest, err error) { + cache := provider.AppManifestsCache{ + AppID: appID, + Cache: manifestsCache, + Logger: logger, + } + manifests, ok := cache.Get(commit) + if ok { + return manifests, nil + } + + // When the manifests were not in the cache we have to load them. + if manifests, err = loader.LoadManifests(ctx); err != nil { + return nil, err + } + cache.Put(commit, manifests) + + return manifests, nil +} + +func addBuiltinAnnotations(manifests []provider.Manifest, variantLabel, variant, hash, pipedID, appID string) { + for i := range manifests { + manifests[i].AddAnnotations(map[string]string{ + provider.LabelManagedBy: provider.ManagedByPiped, + provider.LabelPiped: pipedID, + provider.LabelApplication: appID, + variantLabel: variant, + provider.LabelOriginalAPIVersion: manifests[i].Key.APIVersion, + provider.LabelResourceKey: manifests[i].Key.String(), + provider.LabelCommitHash: hash, + }) + } +} + +func applyManifests(ctx context.Context, ag applierGetter, manifests []provider.Manifest, namespace string, lp executor.LogPersister) error { + if namespace == "" { + lp.Infof("Start applying %d manifests", len(manifests)) + } else { + lp.Infof("Start applying %d manifests to %q namespace", len(manifests), namespace) + } + + for _, m := range manifests { + applier, err := ag.Get(m.Key) + if err != nil { + lp.Error(err.Error()) + return err + } + + annotation := m.GetAnnotations()[provider.LabelSyncReplace] + if annotation != provider.UseReplaceEnabled { + if err := applier.ApplyManifest(ctx, m); err != nil { + lp.Errorf("Failed to apply manifest: %s (%w)", m.Key.ReadableString(), err) + return err + } + lp.Successf("- applied manifest: %s", m.Key.ReadableString()) + continue + } + // Always try to replace first and create if it fails due to resource not found error. + // This is because we cannot know whether resource already exists before executing command. + err = applier.ReplaceManifest(ctx, m) + if errors.Is(err, provider.ErrNotFound) { + lp.Infof("Specified resource does not exist, so create the resource: %s (%w)", m.Key.ReadableString(), err) + err = applier.CreateManifest(ctx, m) + } + if err != nil { + lp.Errorf("Failed to replace or create manifest: %s (%w)", m.Key.ReadableString(), err) + return err + } + lp.Successf("- replaced or created manifest: %s", m.Key.ReadableString()) + + } + lp.Successf("Successfully applied %d manifests", len(manifests)) + return nil +} + +func deleteResources(ctx context.Context, ag applierGetter, resources []provider.ResourceKey, lp executor.LogPersister) error { + resourcesLen := len(resources) + if resourcesLen == 0 { + lp.Info("No resources to delete") + return nil + } + + lp.Infof("Start deleting %d resources", len(resources)) + var deletedCount int + + for _, k := range resources { + applier, err := ag.Get(k) + if err != nil { + lp.Error(err.Error()) + return err + } + + err = applier.Delete(ctx, k) + if err == nil { + lp.Successf("- deleted resource: %s", k.ReadableString()) + deletedCount++ + continue + } + if errors.Is(err, provider.ErrNotFound) { + lp.Infof("- no resource %s to delete", k.ReadableString()) + deletedCount++ + continue + } + lp.Errorf("- unable to delete resource: %s (%v)", k.ReadableString(), err) + } + + if deletedCount < resourcesLen { + lp.Infof("Deleted %d/%d resources", deletedCount, resourcesLen) + return fmt.Errorf("unable to delete %d resources", resourcesLen-deletedCount) + } + + lp.Successf("Successfully deleted %d resources", len(resources)) + return nil +} + +func findManifests(kind, name string, manifests []provider.Manifest) []provider.Manifest { + out := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + if m.Key.Kind != kind { + continue + } + if name != "" && m.Key.Name != name { + continue + } + out = append(out, m) + } + return out +} + +func findConfigMapManifests(manifests []provider.Manifest) []provider.Manifest { + out := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + if !m.Key.IsConfigMap() { + continue + } + out = append(out, m) + } + return out +} + +func findSecretManifests(manifests []provider.Manifest) []provider.Manifest { + out := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + if !m.Key.IsSecret() { + continue + } + out = append(out, m) + } + return out +} + +func findWorkloadManifests(manifests []provider.Manifest, refs []config.K8sResourceReference) []provider.Manifest { + if len(refs) == 0 { + return findManifests(provider.KindDeployment, "", manifests) + } + + workloads := make([]provider.Manifest, 0) + for _, ref := range refs { + kind := provider.KindDeployment + if ref.Kind != "" { + kind = ref.Kind + } + ms := findManifests(kind, ref.Name, manifests) + workloads = append(workloads, ms...) + } + return workloads +} + +func duplicateManifests(manifests []provider.Manifest, nameSuffix string) []provider.Manifest { + out := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + out = append(out, duplicateManifest(m, nameSuffix)) + } + return out +} + +func duplicateManifest(m provider.Manifest, nameSuffix string) provider.Manifest { + name := makeSuffixedName(m.Key.Name, nameSuffix) + return m.Duplicate(name) +} + +func generateVariantServiceManifests(services []provider.Manifest, variantLabel, variant, nameSuffix string) ([]provider.Manifest, error) { + manifests := make([]provider.Manifest, 0, len(services)) + updateService := func(s *corev1.Service) { + s.Name = makeSuffixedName(s.Name, nameSuffix) + // Currently, we suppose that all generated services should be ClusterIP. + s.Spec.Type = corev1.ServiceTypeClusterIP + // Append the variant label to the selector + // to ensure that the generated service is using only workloads of this variant. + if s.Spec.Selector == nil { + s.Spec.Selector = map[string]string{} + } + s.Spec.Selector[variantLabel] = variant + // Empty all unneeded fields. + s.Spec.ExternalIPs = nil + s.Spec.LoadBalancerIP = "" + s.Spec.LoadBalancerSourceRanges = nil + } + + for _, m := range services { + s := &corev1.Service{} + if err := m.ConvertToStructuredObject(s); err != nil { + return nil, err + } + updateService(s) + manifest, err := provider.ParseFromStructuredObject(s) + if err != nil { + return nil, fmt.Errorf("failed to parse Service object to Manifest: %w", err) + } + manifests = append(manifests, manifest) + } + return manifests, nil +} + +func generateVariantWorkloadManifests(workloads, configmaps, secrets []provider.Manifest, variantLabel, variant, nameSuffix string, replicasCalculator func(*int32) int32) ([]provider.Manifest, error) { + manifests := make([]provider.Manifest, 0, len(workloads)) + + cmNames := make(map[string]struct{}, len(configmaps)) + for i := range configmaps { + cmNames[configmaps[i].Key.Name] = struct{}{} + } + + secretNames := make(map[string]struct{}, len(secrets)) + for i := range secrets { + secretNames[secrets[i].Key.Name] = struct{}{} + } + + updateContainers := func(containers []corev1.Container) { + for _, container := range containers { + for _, env := range container.Env { + if v := env.ValueFrom; v != nil { + if ref := v.ConfigMapKeyRef; ref != nil { + if _, ok := cmNames[ref.Name]; ok { + ref.Name = makeSuffixedName(ref.Name, nameSuffix) + } + } + if ref := v.SecretKeyRef; ref != nil { + if _, ok := secretNames[ref.Name]; ok { + ref.Name = makeSuffixedName(ref.Name, nameSuffix) + } + } + } + } + for _, envFrom := range container.EnvFrom { + if ref := envFrom.ConfigMapRef; ref != nil { + if _, ok := cmNames[ref.Name]; ok { + ref.Name = makeSuffixedName(ref.Name, nameSuffix) + } + } + if ref := envFrom.SecretRef; ref != nil { + if _, ok := secretNames[ref.Name]; ok { + ref.Name = makeSuffixedName(ref.Name, nameSuffix) + } + } + } + } + } + + updatePod := func(pod *corev1.PodTemplateSpec) { + // Add variant labels. + if pod.Labels == nil { + pod.Labels = map[string]string{} + } + pod.Labels[variantLabel] = variant + + // Update volumes to use canary's ConfigMaps and Secrets. + for i := range pod.Spec.Volumes { + if cm := pod.Spec.Volumes[i].ConfigMap; cm != nil { + if _, ok := cmNames[cm.Name]; ok { + cm.Name = makeSuffixedName(cm.Name, nameSuffix) + } + } + if s := pod.Spec.Volumes[i].Secret; s != nil { + if _, ok := secretNames[s.SecretName]; ok { + s.SecretName = makeSuffixedName(s.SecretName, nameSuffix) + } + } + } + + // Update ENV references in containers. + updateContainers(pod.Spec.InitContainers) + updateContainers(pod.Spec.Containers) + } + + updateDeployment := func(d *appsv1.Deployment) { + d.Name = makeSuffixedName(d.Name, nameSuffix) + if replicasCalculator != nil { + replicas := replicasCalculator(d.Spec.Replicas) + d.Spec.Replicas = &replicas + } + d.Spec.Selector = metav1.AddLabelToSelector(d.Spec.Selector, variantLabel, variant) + updatePod(&d.Spec.Template) + } + + for _, m := range workloads { + switch m.Key.Kind { + case provider.KindDeployment: + d := &appsv1.Deployment{} + if err := m.ConvertToStructuredObject(d); err != nil { + return nil, err + } + updateDeployment(d) + manifest, err := provider.ParseFromStructuredObject(d) + if err != nil { + return nil, err + } + manifests = append(manifests, manifest) + + default: + return nil, fmt.Errorf("unsupported workload kind %s", m.Key.Kind) + } + } + + return manifests, nil +} + +func checkVariantSelectorInWorkload(m provider.Manifest, variantLabel, variant string) error { + var ( + matchLabelsFields = []string{"spec", "selector", "matchLabels"} + labelsFields = []string{"spec", "template", "metadata", "labels"} + ) + + matchLabels, err := m.GetNestedStringMap(matchLabelsFields...) + if err != nil { + return err + } + value, ok := matchLabels[variantLabel] + if !ok { + return fmt.Errorf("missing %s key in spec.selector.matchLabels", variantLabel) + } + if value != variant { + return fmt.Errorf("require %s but got %s for %s key in %s", variant, value, variantLabel, strings.Join(matchLabelsFields, ".")) + } + + labels, err := m.GetNestedStringMap(labelsFields...) + if err != nil { + return err + } + value, ok = labels[variantLabel] + if !ok { + return fmt.Errorf("missing %s key in spec.template.metadata.labels", variantLabel) + } + if value != variant { + return fmt.Errorf("require %s but got %s for %s key in %s", variant, value, variantLabel, strings.Join(labelsFields, ".")) + } + + return nil +} + +func ensureVariantSelectorInWorkload(m provider.Manifest, variantLabel, variant string) error { + variantMap := map[string]string{ + variantLabel: variant, + } + if err := m.AddStringMapValues(variantMap, "spec", "selector", "matchLabels"); err != nil { + return err + } + return m.AddStringMapValues(variantMap, "spec", "template", "metadata", "labels") +} + +func makeSuffixedName(name, suffix string) string { + if suffix != "" { + return name + "-" + suffix + } + return name +} + +// annotateConfigHash appends a hash annotation into the workload manifests. +// The hash value is calculated by hashing the content of all configmaps/secrets +// that are referenced by the workload. +// This appending ensures that the workload should be restarted when +// one of its configurations changed. +func annotateConfigHash(manifests []provider.Manifest) error { + if len(manifests) == 0 { + return nil + } + + configMaps := make(map[string]provider.Manifest) + secrets := make(map[string]provider.Manifest) + for _, m := range manifests { + if m.Key.IsConfigMap() { + configMaps[m.Key.Name] = m + continue + } + if m.Key.IsSecret() { + secrets[m.Key.Name] = m + } + } + + // This application is not containing any config manifests + // so nothing to do. + if len(configMaps)+len(secrets) == 0 { + return nil + } + + for _, m := range manifests { + if m.Key.IsDeployment() { + if err := annotateConfigHashToDeployment(m, configMaps, secrets); err != nil { + return err + } + } + + // TODO: Anotate config hash into other workload kinds such as DaemonSet, StatefulSet... + } + + return nil +} + +func annotateConfigHashToDeployment(m provider.Manifest, managedConfigMaps, managedSecrets map[string]provider.Manifest) error { + d := &appsv1.Deployment{} + if err := m.ConvertToStructuredObject(d); err != nil { + return err + } + + configMaps := provider.FindReferencingConfigMapsInDeployment(d) + secrets := provider.FindReferencingSecretsInDeployment(d) + + // The deployment is not referencing any config resources. + if len(configMaps)+len(secrets) == 0 { + return nil + } + + cfgs := make([]provider.Manifest, 0, len(configMaps)+len(secrets)) + for _, cm := range configMaps { + m, ok := managedConfigMaps[cm] + if !ok { + // We do not return error here because the deployment may use + // a config resource that is not managed by PipeCD. + continue + } + cfgs = append(cfgs, m) + } + for _, s := range secrets { + m, ok := managedSecrets[s] + if !ok { + // We do not return error here because the deployment may use + // a config resource that is not managed by PipeCD. + continue + } + cfgs = append(cfgs, m) + } + + if len(cfgs) == 0 { + return nil + } + + hash, err := provider.HashManifests(cfgs) + if err != nil { + return err + } + + m.AddStringMapValues( + map[string]string{ + provider.AnnotationConfigHash: hash, + }, + "spec", + "template", + "metadata", + "annotations", + ) + return nil +} + +type patcher func(m provider.Manifest, cfg config.K8sResourcePatch) (*provider.Manifest, error) + +func patchManifests(manifests []provider.Manifest, patches []config.K8sResourcePatch, patcher patcher) ([]provider.Manifest, error) { + if len(patches) == 0 { + return manifests, nil + } + + out := make([]provider.Manifest, len(manifests)) + copy(out, manifests) + + for _, p := range patches { + target := -1 + for i, m := range out { + if m.Key.Kind != p.Target.Kind { + continue + } + if m.Key.Name != p.Target.Name { + continue + } + target = i + break + } + if target < 0 { + return nil, fmt.Errorf("no manifest matches the given patch: kind=%s, name=%s", p.Target.Kind, p.Target.Name) + } + patched, err := patcher(out[target], p) + if err != nil { + return nil, fmt.Errorf("failed to patch manifest: %s, error: %w", out[target].Key, err) + } + out[target] = *patched + } + + return out, nil +} + +func patchManifest(m provider.Manifest, patch config.K8sResourcePatch) (*provider.Manifest, error) { + if len(patch.Ops) == 0 { + return &m, nil + } + + fullBytes, err := m.YamlBytes() + if err != nil { + return nil, err + } + + process := func(bytes []byte) ([]byte, error) { + p, err := yamlprocessor.NewProcessor(bytes) + if err != nil { + return nil, err + } + + for _, o := range patch.Ops { + switch o.Op { + case config.K8sResourcePatchOpYAMLReplace: + if err := p.ReplaceString(o.Path, o.Value); err != nil { + return nil, fmt.Errorf("failed to replace value at path: %s, error: %w", o.Path, err) + } + default: + // TODO: Support more patch operation for K8sCanaryRolloutStageOptions. + return nil, fmt.Errorf("%s operation is not supported currently", o.Op) + } + } + + return p.Bytes(), nil + } + + buildManifest := func(bytes []byte) (*provider.Manifest, error) { + manifests, err := provider.ParseManifests(string(bytes)) + if err != nil { + return nil, err + } + if len(manifests) != 1 { + return nil, fmt.Errorf("unexpected number of manifests, expected 1, got %d", len(manifests)) + } + return &manifests[0], nil + } + + // When the target is the whole manifest, + // just pass full bytes to process and build a new manifest based on the returned data. + root := patch.Target.DocumentRoot + if root == "" { + out, err := process(fullBytes) + if err != nil { + return nil, err + } + return buildManifest(out) + } + + // When the target is a manifest field specified by documentRoot, + // we have to extract that field value as a string. + p, err := yamlprocessor.NewProcessor(fullBytes) + if err != nil { + return nil, err + } + + v, err := p.GetValue(root) + if err != nil { + return nil, err + } + sv, ok := v.(string) + if !ok { + return nil, fmt.Errorf("the value for the specified root %s must be a string", root) + } + + // And process that field data. + out, err := process([]byte(sv)) + if err != nil { + return nil, err + } + + // Then rewrite the new data into the specified root. + if err := p.ReplaceString(root, string(out)); err != nil { + return nil, err + } + + return buildManifest(p.Bytes()) +} diff --git a/pkg/app/pipedv1/executor/kubernetes/kubernetes_test.go b/pkg/app/pipedv1/executor/kubernetes/kubernetes_test.go new file mode 100644 index 0000000000..855f3d6dec --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/kubernetes_test.go @@ -0,0 +1,1032 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/metadatastore" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes/kubernetestest" + "github.com/pipe-cd/pipecd/pkg/config" +) + +type fakeLogPersister struct{} + +func (l *fakeLogPersister) Write(_ []byte) (int, error) { return 0, nil } +func (l *fakeLogPersister) Info(_ string) {} +func (l *fakeLogPersister) Infof(_ string, _ ...interface{}) {} +func (l *fakeLogPersister) Success(_ string) {} +func (l *fakeLogPersister) Successf(_ string, _ ...interface{}) {} +func (l *fakeLogPersister) Error(_ string) {} +func (l *fakeLogPersister) Errorf(_ string, _ ...interface{}) {} + +type fakeMetadataStore struct{} + +func (m *fakeMetadataStore) Shared() metadatastore.Store { + return &fakeMetadataSharedStore{} +} + +func (m *fakeMetadataStore) Stage(stageID string) metadatastore.Store { + return &fakeMetadataStageStore{} +} + +type fakeMetadataSharedStore struct{} + +func (m *fakeMetadataSharedStore) Get(_ string) (string, bool) { return "", false } +func (m *fakeMetadataSharedStore) Put(_ context.Context, _, _ string) error { return nil } +func (m *fakeMetadataSharedStore) PutMulti(_ context.Context, _ map[string]string) error { return nil } + +type fakeMetadataStageStore struct{} + +func (m *fakeMetadataStageStore) Get(_ string) (string, bool) { return "", false } +func (m *fakeMetadataStageStore) Put(_ context.Context, _, _ string) error { return nil } +func (m *fakeMetadataStageStore) PutMulti(_ context.Context, _ map[string]string) error { return nil } + +func TestGenerateServiceManifests(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifestsFile string + }{ + { + name: "Update selector and change type to ClusterIP", + manifestsFile: "testdata/services.yaml", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.LoadManifestsFromYAMLFile(tc.manifestsFile) + require.NoError(t, err) + require.Equal(t, 2, len(manifests)) + + generatedManifests, err := generateVariantServiceManifests(manifests[:1], "pipecd.dev/variant", "canary-variant", "canary") + require.NoError(t, err) + require.Equal(t, 1, len(generatedManifests)) + + assert.Equal(t, manifests[1], generatedManifests[0]) + }) + } +} + +func TestGenerateVariantWorkloadManifests(t *testing.T) { + t.Parallel() + + const ( + variantLabel = "pipecd.dev/variant" + canaryVariant = "canary-variant" + ) + testcases := []struct { + name string + manifestsFile string + configmapsFile string + secretsFile string + }{ + { + name: "No configmap and secret", + manifestsFile: "testdata/no-config-deployments.yaml", + }, + { + name: "Has configmap and secret", + manifestsFile: "testdata/deployments.yaml", + configmapsFile: "testdata/configmaps.yaml", + secretsFile: "testdata/secrets.yaml", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.LoadManifestsFromYAMLFile(tc.manifestsFile) + require.NoError(t, err) + require.Equal(t, 2, len(manifests)) + + var configmaps, secrets []provider.Manifest + if tc.configmapsFile != "" { + configmaps, err = provider.LoadManifestsFromYAMLFile(tc.configmapsFile) + require.NoError(t, err) + } + if tc.secretsFile != "" { + secrets, err = provider.LoadManifestsFromYAMLFile(tc.secretsFile) + require.NoError(t, err) + } + + calculator := func(r *int32) int32 { + return *r - 1 + } + generatedManifests, err := generateVariantWorkloadManifests( + manifests[:1], + configmaps, + secrets, + variantLabel, + canaryVariant, + "canary", + calculator, + ) + require.NoError(t, err) + require.Equal(t, 1, len(generatedManifests)) + + assert.Equal(t, manifests[1], generatedManifests[0]) + }) + } +} + +func TestCheckVariantSelectorInWorkload(t *testing.T) { + t.Parallel() + + const ( + variantLabel = "pipecd.dev/variant" + primaryVariant = "primary" + ) + testcases := []struct { + name string + manifest string + expected error + }{ + { + name: "missing variant in selector", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple +`, + expected: fmt.Errorf("missing pipecd.dev/variant key in spec.selector.matchLabels"), + }, + { + name: "missing variant in template labels", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple +`, + expected: fmt.Errorf("missing pipecd.dev/variant key in spec.template.metadata.labels"), + }, + { + name: "wrong variant in selector", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + pipecd.dev/variant: canary + template: + metadata: + labels: + app: simple +`, + expected: fmt.Errorf("require primary but got canary for pipecd.dev/variant key in spec.selector.matchLabels"), + }, + { + name: "wrong variant in temlate labels", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: canary +`, + expected: fmt.Errorf("require primary but got canary for pipecd.dev/variant key in spec.template.metadata.labels"), + }, + { + name: "ok", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary +`, + expected: nil, + }, + } + + expected := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary +` + generatedManifests, err := provider.ParseManifests(expected) + require.NoError(t, err) + require.Equal(t, 1, len(generatedManifests)) + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.ParseManifests(tc.manifest) + require.NoError(t, err) + require.Equal(t, 1, len(manifests)) + + err = checkVariantSelectorInWorkload(manifests[0], variantLabel, primaryVariant) + assert.Equal(t, tc.expected, err) + + err = ensureVariantSelectorInWorkload(manifests[0], variantLabel, primaryVariant) + assert.NoError(t, err) + assert.Equal(t, generatedManifests[0], manifests[0]) + }) + } + +} + +func TestApplyManifests(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + + testcases := []struct { + name string + applier provider.Applier + manifest string + namespace string + wantErr bool + }{ + + { + name: "unable to apply manifest", + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(errors.New("unexpected error")) + return p + }(), + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple +`, + namespace: "", + wantErr: true, + }, + { + name: "unable to replace manifest", + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ReplaceManifest(gomock.Any(), gomock.Any()).Return(errors.New("unexpected error")) + return p + }(), + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + annotations: + pipecd.dev/sync-by-replace: "enabled" +spec: + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple +`, + namespace: "", + wantErr: true, + }, + { + name: "unable to create manifest", + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ReplaceManifest(gomock.Any(), gomock.Any()).Return(provider.ErrNotFound) + p.EXPECT().CreateManifest(gomock.Any(), gomock.Any()).Return(errors.New("unexpected error")) + return p + }(), + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + annotations: + pipecd.dev/sync-by-replace: "enabled" +spec: + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple +`, + namespace: "", + wantErr: true, + }, + { + name: "successfully apply manifest", + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple +`, + namespace: "", + wantErr: false, + }, + { + name: "successfully replace manifest", + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ReplaceManifest(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + annotations: + pipecd.dev/sync-by-replace: "enabled" +spec: + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple +`, + namespace: "", + wantErr: false, + }, + { + name: "successfully create manifest", + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ReplaceManifest(gomock.Any(), gomock.Any()).Return(provider.ErrNotFound) + p.EXPECT().CreateManifest(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + annotations: + pipecd.dev/sync-by-replace: "enabled" +spec: + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple +`, + namespace: "", + wantErr: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + manifests, err := provider.ParseManifests(tc.manifest) + require.NoError(t, err) + ag := &applierGroup{defaultApplier: tc.applier} + err = applyManifests(ctx, ag, manifests, tc.namespace, &fakeLogPersister{}) + assert.Equal(t, tc.wantErr, err != nil) + }) + } +} + +func TestDeleteResources(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + + testcases := []struct { + name string + applier provider.Applier + resources []provider.ResourceKey + wantErr bool + }{ + { + name: "no resource to delete", + wantErr: false, + resources: []provider.ResourceKey{}, + }, + { + name: "not found resource to delete", + wantErr: false, + resources: []provider.ResourceKey{ + { + Name: "foo", + }, + }, + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().Delete(gomock.Any(), gomock.Any()).Return(provider.ErrNotFound) + return p + }(), + }, + { + name: "unable to delete", + wantErr: true, + resources: []provider.ResourceKey{ + { + Name: "foo", + }, + }, + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().Delete(gomock.Any(), gomock.Any()).Return(fmt.Errorf("unexpected error")) + return p + }(), + }, + { + name: "successfully deletion", + wantErr: false, + resources: []provider.ResourceKey{ + { + Name: "foo", + }, + }, + applier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().Delete(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + ag := &applierGroup{defaultApplier: tc.applier} + err := deleteResources(ctx, ag, tc.resources, &fakeLogPersister{}) + assert.Equal(t, tc.wantErr, err != nil) + }) + } +} + +func TestAnnotateConfigHash(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifests string + expected string + expectedError error + }{ + { + name: "empty list", + }, + { + name: "one config", + manifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + configMap: + name: canary-by-config-change +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: canary-by-config-change +data: + two: "2" +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + annotations: + pipecd.dev/config-hash: 75c9m2btb6 + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + configMap: + name: canary-by-config-change +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: canary-by-config-change +data: + two: "2" +`, + }, + { + name: "multiple configs", + manifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + configMap: + name: canary-by-config-change + - name: secret + secret: + secretName: secret-1 + - name: unmanaged-config + configMap: + name: unmanaged-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: canary-by-config-change +data: + two: "2" +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-1 +type: my-type +data: + "one": "Mg==" +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + annotations: + pipecd.dev/config-hash: t7dtkdm455 + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + configMap: + name: canary-by-config-change + - name: secret + secret: + secretName: secret-1 + - name: unmanaged-config + configMap: + name: unmanaged-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: canary-by-config-change +data: + two: "2" +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-1 +type: my-type +data: + "one": "Mg==" +`, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.ParseManifests(tc.manifests) + require.NoError(t, err) + + expected, err := provider.ParseManifests(tc.expected) + require.NoError(t, err) + + err = annotateConfigHash(manifests) + assert.Equal(t, expected, manifests) + assert.Equal(t, tc.expectedError, err) + }) + } +} + +func TestPatchManifest(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifests string + patch config.K8sResourcePatch + expectedError error + }{ + { + name: "one op", + manifests: "testdata/patch_configmap.yaml", + patch: config.K8sResourcePatch{ + Ops: []config.K8sResourcePatchOp{ + { + Op: config.K8sResourcePatchOpYAMLReplace, + Path: "$.data.key1", + Value: "value-1", + }, + }, + }, + }, + { + name: "multi ops", + manifests: "testdata/patch_configmap_multi_ops.yaml", + patch: config.K8sResourcePatch{ + Ops: []config.K8sResourcePatchOp{ + { + Op: config.K8sResourcePatchOpYAMLReplace, + Path: "$.data.key1", + Value: "value-1", + }, + { + Op: config.K8sResourcePatchOpYAMLReplace, + Path: "$.data.key2", + Value: "value-2", + }, + }, + }, + }, + { + name: "one op with a given field", + manifests: "testdata/patch_configmap_field.yaml", + patch: config.K8sResourcePatch{ + Target: config.K8sResourcePatchTarget{ + DocumentRoot: "$.data.envoy-config", + }, + Ops: []config.K8sResourcePatchOp{ + { + Op: config.K8sResourcePatchOpYAMLReplace, + Path: "$.admin.address.socket_address.port_value", + Value: "9096", + }, + }, + }, + }, + { + name: "multi ops with a given field", + manifests: "testdata/patch_configmap_field_multi_ops.yaml", + patch: config.K8sResourcePatch{ + Target: config.K8sResourcePatchTarget{ + DocumentRoot: "$.data.envoy-config", + }, + Ops: []config.K8sResourcePatchOp{ + { + Op: config.K8sResourcePatchOpYAMLReplace, + Path: "$.admin.address.socket_address.port_value", + Value: "19095", + }, + { + Op: config.K8sResourcePatchOpYAMLReplace, + Path: "$.static_resources.clusters[1].load_assignment.endpoints[0].lb_endpoints[0].endpoint.address.socket_address.port_value", + Value: "19081", + }, + { + Op: config.K8sResourcePatchOpYAMLReplace, + Path: "$.static_resources.clusters[1].type", + Value: "DNS", + }, + }, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.LoadManifestsFromYAMLFile(tc.manifests) + require.NoError(t, err) + + if tc.expectedError == nil { + require.Equal(t, 2, len(manifests)) + } else { + require.Equal(t, 1, len(manifests)) + } + + got, err := patchManifest(manifests[0], tc.patch) + require.Equal(t, tc.expectedError, err) + + expectedBytes, err := manifests[1].YamlBytes() + require.NoError(t, err) + + gotBytes, err := got.YamlBytes() + require.NoError(t, err) + + if tc.expectedError == nil { + assert.Equal(t, string(expectedBytes), string(gotBytes)) + } + }) + } +} + +func TestPatchManifests(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + input []provider.Manifest + patches []config.K8sResourcePatch + expected []provider.Manifest + expectedErr error + }{ + { + name: "no patches", + input: []provider.Manifest{ + { + Key: provider.ResourceKey{ + Kind: "Deployment", + Name: "deployment-1", + }, + }, + }, + expected: []provider.Manifest{ + { + Key: provider.ResourceKey{ + Kind: "Deployment", + Name: "deployment-1", + }, + }, + }, + }, + { + name: "no manifest for the given patch", + input: []provider.Manifest{ + { + Key: provider.ResourceKey{ + Kind: "Deployment", + Name: "deployment-1", + }, + }, + }, + patches: []config.K8sResourcePatch{ + { + Target: config.K8sResourcePatchTarget{ + K8sResourceReference: config.K8sResourceReference{ + Kind: "Deployment", + Name: "deployment-2", + }, + }, + }, + }, + expectedErr: errors.New("no manifest matches the given patch: kind=Deployment, name=deployment-2"), + }, + { + name: "multiple patches", + input: []provider.Manifest{ + { + Key: provider.ResourceKey{ + Kind: "Deployment", + Name: "deployment-1", + }, + }, + { + Key: provider.ResourceKey{ + Kind: "Deployment", + Name: "deployment-2", + }, + }, + { + Key: provider.ResourceKey{ + Kind: "ConfigMap", + Name: "configmap-1", + }, + }, + }, + patches: []config.K8sResourcePatch{ + { + Target: config.K8sResourcePatchTarget{ + K8sResourceReference: config.K8sResourceReference{ + Kind: "ConfigMap", + Name: "configmap-1", + }, + }, + }, + { + Target: config.K8sResourcePatchTarget{ + K8sResourceReference: config.K8sResourceReference{ + Kind: "Deployment", + Name: "deployment-1", + }, + }, + }, + { + Target: config.K8sResourcePatchTarget{ + K8sResourceReference: config.K8sResourceReference{ + Kind: "ConfigMap", + Name: "configmap-1", + }, + }, + }, + }, + expected: []provider.Manifest{ + { + Key: provider.ResourceKey{ + Kind: "Deployment", + Name: "deployment-1", + Namespace: "+", + }, + }, + { + Key: provider.ResourceKey{ + Kind: "Deployment", + Name: "deployment-2", + }, + }, + { + Key: provider.ResourceKey{ + Kind: "ConfigMap", + Name: "configmap-1", + Namespace: "++", + }, + }, + }, + }, + } + + patcher := func(m provider.Manifest, cfg config.K8sResourcePatch) (*provider.Manifest, error) { + out := m + out.Key.Namespace = fmt.Sprintf("%s+", out.Key.Namespace) + return &out, nil + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got, err := patchManifests(tc.input, tc.patches, patcher) + assert.Equal(t, tc.expectedErr, err) + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/pkg/app/pipedv1/executor/kubernetes/primary.go b/pkg/app/pipedv1/executor/kubernetes/primary.go new file mode 100644 index 0000000000..c1d95082d7 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/primary.go @@ -0,0 +1,244 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "time" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func (e *deployExecutor) ensurePrimaryRollout(ctx context.Context) model.StageStatus { + var ( + options = e.StageConfig.K8sPrimaryRolloutStageOptions + variantLabel = e.appCfg.VariantLabel.Key + primaryVariant = e.appCfg.VariantLabel.PrimaryValue + ) + if options == nil { + e.LogPersister.Errorf("Malformed configuration for stage %s", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + // Load the manifests at the triggered commit. + e.LogPersister.Infof("Loading manifests at trigered commit %s for handling", e.commit) + manifests, err := loadManifests( + ctx, + e.Deployment.ApplicationId, + e.commit, + e.AppManifestsCache, + e.loader, + e.Logger, + ) + if err != nil { + e.LogPersister.Errorf("Failed while loading manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully loaded %d manifests", len(manifests)) + + var primaryManifests []provider.Manifest + routingMethod := config.DetermineKubernetesTrafficRoutingMethod(e.appCfg.TrafficRouting) + + switch routingMethod { + // In case of routing by Pod selector, + // all manifests can be used as primary manifests. + case config.KubernetesTrafficRoutingMethodPodSelector: + primaryManifests = manifests + + // In case of routing by Istio, + // VirtualService manifest will be used to manipulate the traffic ratio. + // Other manifests can be used as primary manifests. + case config.KubernetesTrafficRoutingMethodIstio: + // Firstly, find the VirtualService manifests. + istioCfg := e.appCfg.TrafficRouting.Istio + if istioCfg == nil { + istioCfg = &config.IstioTrafficRouting{} + } + trafficRoutingManifests, err := findIstioVirtualServiceManifests(manifests, istioCfg.VirtualService) + if err != nil { + e.LogPersister.Errorf("Failed while finding traffic routing manifest: (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + // Then remove them from the list of primary manifests. + if len(trafficRoutingManifests) > 0 { + primaryManifests = make([]provider.Manifest, 0, len(manifests)-1) + for _, m := range manifests { + if m.Key == trafficRoutingManifests[0].Key { + continue + } + primaryManifests = append(primaryManifests, m) + } + } + + default: + e.LogPersister.Errorf("Traffic routing method %v is not supported", routingMethod) + return model.StageStatus_STAGE_FAILURE + } + + // Check if the variant selector is in the workloads. + if !options.AddVariantLabelToSelector && + routingMethod == config.KubernetesTrafficRoutingMethodPodSelector && + e.appCfg.HasStage(model.StageK8sTrafficRouting) { + workloads := findWorkloadManifests(primaryManifests, e.appCfg.Workloads) + var invalid bool + for _, m := range workloads { + if err := checkVariantSelectorInWorkload(m, variantLabel, primaryVariant); err != nil { + invalid = true + e.LogPersister.Errorf("Missing %q in selector of workload %s (%v)", variantLabel+": "+primaryVariant, m.Key.ReadableString(), err) + } + } + if invalid { + return model.StageStatus_STAGE_FAILURE + } + } + + // Generate the manifests for applying. + e.LogPersister.Info("Start generating manifests for PRIMARY variant") + if primaryManifests, err = e.generatePrimaryManifests(primaryManifests, *options, variantLabel, primaryVariant); err != nil { + e.LogPersister.Errorf("Unable to generate manifests for PRIMARY variant (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully generated %d manifests for PRIMARY variant", len(primaryManifests)) + + // Add builtin annotations for tracking application live state. + addBuiltinAnnotations( + primaryManifests, + variantLabel, + primaryVariant, + e.commit, + e.PipedConfig.PipedID, + e.Deployment.ApplicationId, + ) + + // Add config-hash annotation to the workloads. + if err := annotateConfigHash(primaryManifests); err != nil { + e.LogPersister.Errorf("Unable to set %q annotation into the workload manifest (%v)", provider.AnnotationConfigHash, err) + return model.StageStatus_STAGE_FAILURE + } + + // Start applying all manifests to add or update running resources. + e.LogPersister.Info("Start rolling out PRIMARY variant...") + if err := applyManifests(ctx, e.applierGetter, primaryManifests, e.appCfg.Input.Namespace, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Success("Successfully rolled out PRIMARY variant") + + if !options.Prune { + e.LogPersister.Info("Resource GC was skipped because sync.prune was not configured") + return model.StageStatus_STAGE_SUCCESS + } + + // Wait for all applied manifests to be stable. + // In theory, we don't need to wait for them to be stable before going to the next step + // but waiting for a while reduces the number of Kubernetes changes in a short time. + e.LogPersister.Info("Waiting for the applied manifests to be stable") + select { + case <-time.After(15 * time.Second): + break + case <-ctx.Done(): + break + } + + // Find the running resources that are not defined in Git. + e.LogPersister.Info("Start finding all running PRIMARY resources but no longer defined in Git") + runningManifests, err := e.loadRunningManifests(ctx) + if err != nil { + e.LogPersister.Errorf("Failed while loading running manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully loaded %d live resources", len(runningManifests)) + for _, m := range runningManifests { + e.LogPersister.Successf("- loaded live resource: %s", m.Key.ReadableString()) + } + + removeKeys := findRemoveManifests(runningManifests, manifests, e.appCfg.Input.Namespace) + if len(removeKeys) == 0 { + e.LogPersister.Info("There are no live resources should be removed") + return model.StageStatus_STAGE_SUCCESS + } + e.LogPersister.Infof("Found %d live resources that are no longer defined in Git", len(removeKeys)) + + // Start deleting all running resources that are not defined in Git. + e.LogPersister.Infof("Start deleting %d resources", len(removeKeys)) + if err := deleteResources(ctx, e.applierGetter, removeKeys, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func findRemoveManifests(prevs []provider.Manifest, curs []provider.Manifest, namespace string) []provider.ResourceKey { + var ( + keys = make(map[provider.ResourceKey]struct{}, len(curs)) + removeKeys = make([]provider.ResourceKey, 0) + ) + for _, m := range curs { + keys[m.Key] = struct{}{} + } + for _, m := range prevs { + key := m.Key + if _, ok := keys[key]; ok { + continue + } + if key.Namespace == "" { + key.Namespace = namespace + } + removeKeys = append(removeKeys, key) + } + return removeKeys +} + +func (e *deployExecutor) generatePrimaryManifests(manifests []provider.Manifest, opts config.K8sPrimaryRolloutStageOptions, variantLabel, variant string) ([]provider.Manifest, error) { + suffix := variant + if opts.Suffix != "" { + suffix = opts.Suffix + } + + // Because the loaded manifests are read-only + // we duplicate them to avoid updating the shared manifests data in cache. + manifests = duplicateManifests(manifests, "") + + // When addVariantLabelToSelector is true, ensure that all workloads + // have the variant label in their selector. + if opts.AddVariantLabelToSelector { + workloads := findWorkloadManifests(manifests, e.appCfg.Workloads) + for _, m := range workloads { + if err := ensureVariantSelectorInWorkload(m, variantLabel, variant); err != nil { + return nil, fmt.Errorf("unable to check/set %q in selector of workload %s (%w)", variantLabel+": "+variant, m.Key.ReadableString(), err) + } + } + } + + // Find service manifests and duplicate them for PRIMARY variant. + if opts.CreateService { + serviceName := e.appCfg.Service.Name + services := findManifests(provider.KindService, serviceName, manifests) + if len(services) == 0 { + return nil, fmt.Errorf("unable to find any service for name=%q", serviceName) + } + services = duplicateManifests(services, "") + + generatedServices, err := generateVariantServiceManifests(services, variantLabel, variant, suffix) + if err != nil { + return nil, err + } + manifests = append(manifests, generatedServices...) + } + + return manifests, nil +} diff --git a/pkg/app/pipedv1/executor/kubernetes/primary_test.go b/pkg/app/pipedv1/executor/kubernetes/primary_test.go new file mode 100644 index 0000000000..d44bea2da9 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/primary_test.go @@ -0,0 +1,425 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes/kubernetestest" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/cache/cachetest" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestEnsurePrimaryRollout(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + appCfg := &config.KubernetesApplicationSpec{ + VariantLabel: config.KubernetesVariantLabel{ + Key: "pipecd.dev/variant", + PrimaryValue: "primary", + BaselineValue: "baseline", + CanaryValue: "canary", + }, + } + testcases := []struct { + name string + executor *deployExecutor + want model.StageStatus + }{ + { + name: "malformed configuration", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + Stage: &model.PipelineStage{}, + LogPersister: &fakeLogPersister{}, + Logger: zap.NewNop(), + }, + appCfg: appCfg, + }, + }, + { + name: "failed to load manifest", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sPrimaryRolloutStageOptions: &config.K8sPrimaryRolloutStageOptions{}, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return(nil, fmt.Errorf("error")) + return p + }(), + appCfg: appCfg, + }, + }, + { + name: "successfully apply a manifest", + want: model.StageStatus_STAGE_SUCCESS, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + PipedConfig: &config.PipedSpec{}, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sPrimaryRolloutStageOptions: &config.K8sPrimaryRolloutStageOptions{ + AddVariantLabelToSelector: true, + }, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{}}, + }), + }, nil) + return p + }(), + applierGetter: &applierGroup{ + defaultApplier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + }, + appCfg: &config.KubernetesApplicationSpec{}, + }, + }, + { + name: "successfully apply two manifests", + want: model.StageStatus_STAGE_SUCCESS, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + PipedConfig: &config.PipedSpec{}, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sPrimaryRolloutStageOptions: &config.K8sPrimaryRolloutStageOptions{ + AddVariantLabelToSelector: true, + }, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{}}, + }), + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindService, + Name: "foo", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{}}, + }), + }, nil) + return p + }(), + applierGetter: &applierGroup{ + defaultApplier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(nil) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + }, + appCfg: &config.KubernetesApplicationSpec{ + Service: config.K8sResourceReference{ + Kind: "Service", + Name: "foo", + }, + }, + }, + }, + { + name: "filter out VirtualService", + want: model.StageStatus_STAGE_SUCCESS, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + PipedConfig: &config.PipedSpec{}, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sPrimaryRolloutStageOptions: &config.K8sPrimaryRolloutStageOptions{ + AddVariantLabelToSelector: true, + }, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: "VirtualService", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{}}, + }), + }, nil) + return p + }(), + appCfg: &config.KubernetesApplicationSpec{ + TrafficRouting: &config.KubernetesTrafficRouting{ + Method: config.KubernetesTrafficRoutingMethodIstio, + }, + }, + }, + }, + { + name: "lack of variant label", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + PipedConfig: &config.PipedSpec{}, + LogPersister: &fakeLogPersister{}, + Stage: &model.PipelineStage{}, + StageConfig: config.PipelineStage{ + K8sPrimaryRolloutStageOptions: &config.K8sPrimaryRolloutStageOptions{ + AddVariantLabelToSelector: false, + }, + }, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{}}, + }), + }, nil) + return p + }(), + appCfg: &config.KubernetesApplicationSpec{ + GenericApplicationSpec: config.GenericApplicationSpec{ + Pipeline: &config.DeploymentPipeline{ + Stages: []config.PipelineStage{ + { + Name: model.StageK8sTrafficRouting, + }, + }, + }, + }, + TrafficRouting: &config.KubernetesTrafficRouting{ + Method: config.KubernetesTrafficRoutingMethodPodSelector, + }, + }, + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + + ctx := context.Background() + got := tc.executor.ensurePrimaryRollout(ctx) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestFindRemoveManifests(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + prevs []provider.Manifest + curs []provider.Manifest + namespace string + want []provider.ResourceKey + }{ + { + name: "no resource removed", + prevs: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + curs: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + want: []provider.ResourceKey{}, + }, + { + name: "one resource removed", + prevs: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + curs: []provider.Manifest{}, + want: []provider.ResourceKey{ + { + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + { + name: "one resource removed with specified namespace", + prevs: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + curs: []provider.Manifest{}, + namespace: "namespace", + want: []provider.ResourceKey{ + { + APIVersion: "v1", + Kind: "Service", + Namespace: "namespace", + Name: "foo", + }, + }, + }, + { + name: "give namespace different from running one", + prevs: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Namespace: "namespace", + Name: "foo", + }, + }, + }, + curs: []provider.Manifest{}, + namespace: "different", + want: []provider.ResourceKey{ + { + APIVersion: "v1", + Kind: "Service", + Namespace: "namespace", + Name: "foo", + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := findRemoveManifests(tc.prevs, tc.curs, tc.namespace) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/executor/kubernetes/rollback.go b/pkg/app/pipedv1/executor/kubernetes/rollback.go new file mode 100644 index 0000000000..bdcdbca360 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/rollback.go @@ -0,0 +1,223 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "encoding/json" + "os" + "os/exec" + "strings" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type rollbackExecutor struct { + executor.Input + + appDir string +} + +func (e *rollbackExecutor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + ctx = sig.Context() + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageRollback: + status = e.ensureRollback(ctx) + case model.StageScriptRunRollback: + status = e.ensureScriptRunRollback(ctx) + default: + e.LogPersister.Errorf("Unsupported stage %s for kubernetes application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *rollbackExecutor) ensureRollback(ctx context.Context) model.StageStatus { + // There is nothing to do if this is the first deployment. + if e.Deployment.RunningCommitHash == "" { + e.LogPersister.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") + return model.StageStatus_STAGE_FAILURE + } + + ds, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + appCfg := ds.ApplicationConfig.KubernetesApplicationSpec + if appCfg == nil { + e.LogPersister.Error("Malformed application configuration: missing KubernetesApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + if appCfg.Input.HelmChart != nil { + chartRepoName := appCfg.Input.HelmChart.Repository + if chartRepoName != "" { + appCfg.Input.HelmChart.Insecure = e.PipedConfig.IsInsecureChartRepository(chartRepoName) + } + } + + e.appDir = ds.AppDir + + loader := provider.NewLoader(e.Deployment.ApplicationName, ds.AppDir, ds.RepoDir, e.Deployment.GitPath.ConfigFilename, appCfg.Input, e.GitClient, e.Logger) + e.Logger.Info("start executing kubernetes stage", + zap.String("stage-name", e.Stage.Name), + zap.String("app-dir", ds.AppDir), + ) + + // Firstly, we reapply all manifests at running commit + // to revert PRIMARY resources and TRAFFIC ROUTING resources. + + // Load the manifests at the specified commit. + e.LogPersister.Infof("Loading manifests at running commit %s for handling", e.Deployment.RunningCommitHash) + manifests, err := loadManifests( + ctx, + e.Deployment.ApplicationId, + e.Deployment.RunningCommitHash, + e.AppManifestsCache, + loader, + e.Logger, + ) + if err != nil { + e.LogPersister.Errorf("Failed while loading running manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully loaded %d manifests", len(manifests)) + + // Because the loaded manifests are read-only + // we duplicate them to avoid updating the shared manifests data in cache. + manifests = duplicateManifests(manifests, "") + + // When addVariantLabelToSelector is true, ensure that all workloads + // have the variant label in their selector. + var ( + variantLabel = appCfg.VariantLabel.Key + primaryVariant = appCfg.VariantLabel.PrimaryValue + ) + if appCfg.QuickSync.AddVariantLabelToSelector { + workloads := findWorkloadManifests(manifests, appCfg.Workloads) + for _, m := range workloads { + if err := ensureVariantSelectorInWorkload(m, variantLabel, primaryVariant); err != nil { + e.LogPersister.Errorf("Unable to check/set %q in selector of workload %s (%v)", variantLabel+": "+primaryVariant, m.Key.ReadableString(), err) + return model.StageStatus_STAGE_FAILURE + } + } + } + + // Add builtin annotations for tracking application live state. + addBuiltinAnnotations( + manifests, + variantLabel, + primaryVariant, + e.Deployment.RunningCommitHash, + e.PipedConfig.PipedID, + e.Deployment.ApplicationId, + ) + + // Add config-hash annotation to the workloads. + if err := annotateConfigHash(manifests); err != nil { + e.LogPersister.Errorf("Unable to set %q annotation into the workload manifest (%v)", provider.AnnotationConfigHash, err) + return model.StageStatus_STAGE_FAILURE + } + + ag, err := newApplierGroup(e.Deployment.PlatformProvider, *appCfg, e.PipedConfig, e.Logger) + if err != nil { + e.LogPersister.Error(err.Error()) + return model.StageStatus_STAGE_FAILURE + } + + // Start applying all manifests to add or update running resources. + if err := applyManifests(ctx, ag, manifests, appCfg.Input.Namespace, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + var errs []error + + // Next we delete all resources of CANARY variant. + e.LogPersister.Info("Start checking to ensure that the CANARY variant should be removed") + if value, ok := e.MetadataStore.Shared().Get(addedCanaryResourcesMetadataKey); ok { + resources := strings.Split(value, ",") + if err := removeCanaryResources(ctx, ag, resources, e.LogPersister); err != nil { + errs = append(errs, err) + } + } + + // Then delete all resources of BASELINE variant. + e.LogPersister.Info("Start checking to ensure that the BASELINE variant should be removed") + if value, ok := e.MetadataStore.Shared().Get(addedBaselineResourcesMetadataKey); ok { + resources := strings.Split(value, ",") + if err := removeBaselineResources(ctx, ag, resources, e.LogPersister); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} + +func (e *rollbackExecutor) ensureScriptRunRollback(ctx context.Context) model.StageStatus { + e.LogPersister.Info("Runnnig commands for rollback...") + + onRollback, ok := e.Stage.Metadata["onRollback"] + if !ok { + e.LogPersister.Error("onRollback metadata is missing") + return model.StageStatus_STAGE_FAILURE + } + + if onRollback == "" { + e.LogPersister.Info("No commands to run") + return model.StageStatus_STAGE_SUCCESS + } + + envStr, ok := e.Stage.Metadata["env"] + env := make(map[string]string, 0) + if ok { + _ = json.Unmarshal([]byte(envStr), &env) + } + + for _, v := range strings.Split(onRollback, "\n") { + if v != "" { + e.LogPersister.Infof(" %s", v) + } + } + + envs := make([]string, 0, len(env)) + for key, value := range env { + envs = append(envs, key+"="+value) + } + + cmd := exec.Command("/bin/sh", "-l", "-c", onRollback) + cmd.Dir = e.appDir + cmd.Env = append(os.Environ(), envs...) + cmd.Stdout = e.LogPersister + cmd.Stderr = e.LogPersister + if err := cmd.Run(); err != nil { + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/kubernetes/sync.go b/pkg/app/pipedv1/executor/kubernetes/sync.go new file mode 100644 index 0000000000..16ef7eb1c9 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/sync.go @@ -0,0 +1,146 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "time" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func (e *deployExecutor) ensureSync(ctx context.Context) model.StageStatus { + // Load the manifests at the specified commit. + e.LogPersister.Infof("Loading manifests at commit %s for handling", e.commit) + manifests, err := loadManifests( + ctx, + e.Deployment.ApplicationId, + e.commit, + e.AppManifestsCache, + e.loader, + e.Logger, + ) + if err != nil { + e.LogPersister.Errorf("Failed while loading manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully loaded %d manifests", len(manifests)) + + // Because the loaded manifests are read-only + // we duplicate them to avoid updating the shared manifests data in cache. + manifests = duplicateManifests(manifests, "") + + // When addVariantLabelToSelector is true, ensure that all workloads + // have the variant label in their selector. + var ( + variantLabel = e.appCfg.VariantLabel.Key + primaryVariant = e.appCfg.VariantLabel.PrimaryValue + ) + if e.appCfg.QuickSync.AddVariantLabelToSelector { + workloads := findWorkloadManifests(manifests, e.appCfg.Workloads) + for _, m := range workloads { + if err := ensureVariantSelectorInWorkload(m, variantLabel, primaryVariant); err != nil { + e.LogPersister.Errorf("Unable to check/set %q in selector of workload %s (%v)", variantLabel+": "+primaryVariant, m.Key.ReadableString(), err) + return model.StageStatus_STAGE_FAILURE + } + } + } + + // Add builtin annotations for tracking application live state. + addBuiltinAnnotations( + manifests, + variantLabel, + primaryVariant, + e.commit, + e.PipedConfig.PipedID, + e.Deployment.ApplicationId, + ) + + // Add config-hash annotation to the workloads. + if err := annotateConfigHash(manifests); err != nil { + e.LogPersister.Errorf("Unable to set %q annotation into the workload manifest (%v)", provider.AnnotationConfigHash, err) + return model.StageStatus_STAGE_FAILURE + } + + // Start applying all manifests to add or update running resources. + if err := applyManifests(ctx, e.applierGetter, manifests, e.appCfg.Input.Namespace, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + if !e.appCfg.QuickSync.Prune { + e.LogPersister.Info("Resource GC was skipped because sync.prune was not configured") + return model.StageStatus_STAGE_SUCCESS + } + + // Wait for all applied manifests to be stable. + // In theory, we don't need to wait for them to be stable before going to the next step + // but waiting for a while reduces the number of Kubernetes changes in a short time. + e.LogPersister.Info("Waiting for the applied manifests to be stable") + select { + case <-time.After(15 * time.Second): + break + case <-ctx.Done(): + break + } + + // Find the running resources that are not defined in Git for removing. + e.LogPersister.Info("Start finding all running resources but no longer defined in Git") + liveResources, ok := e.AppLiveResourceLister.ListKubernetesResources() + if !ok { + e.LogPersister.Info("There is no data about live resource so no resource will be removed") + return model.StageStatus_STAGE_SUCCESS + } + e.LogPersister.Successf("Successfully loaded %d live resources", len(liveResources)) + for _, m := range liveResources { + e.LogPersister.Successf("- loaded live resource: %s", m.Key.ReadableString()) + } + + removeKeys := findRemoveResources(manifests, liveResources) + if len(removeKeys) == 0 { + e.LogPersister.Info("There are no live resources should be removed") + return model.StageStatus_STAGE_SUCCESS + } + e.LogPersister.Infof("Found %d live resources that are no longer defined in Git", len(removeKeys)) + + // Start deleting all running resources that are not defined in Git. + if err := deleteResources(ctx, e.applierGetter, removeKeys, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func findRemoveResources(manifests []provider.Manifest, liveResources []provider.Manifest) []provider.ResourceKey { + var ( + keys = make(map[provider.ResourceKey]struct{}, len(manifests)) + removeKeys = make([]provider.ResourceKey, 0) + ) + for _, m := range manifests { + key := m.Key + key.Namespace = "" + keys[key] = struct{}{} + } + for _, m := range liveResources { + key := m.Key + key.Namespace = "" + if _, ok := keys[key]; ok { + continue + } + key.Namespace = m.Key.Namespace + removeKeys = append(removeKeys, key) + } + return removeKeys +} diff --git a/pkg/app/pipedv1/executor/kubernetes/sync_test.go b/pkg/app/pipedv1/executor/kubernetes/sync_test.go new file mode 100644 index 0000000000..13b136ea23 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/sync_test.go @@ -0,0 +1,257 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes/kubernetestest" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/cache/cachetest" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestEnsureSync(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testcases := []struct { + name string + executor *deployExecutor + want model.StageStatus + }{ + { + name: "failed to load manifest", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + LogPersister: &fakeLogPersister{}, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return(nil, fmt.Errorf("error")) + return p + }(), + }, + }, + { + name: "unable to apply manifests", + want: model.StageStatus_STAGE_FAILURE, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + PipedConfig: &config.PipedSpec{}, + LogPersister: &fakeLogPersister{}, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{}}, + }), + }, nil) + return p + }(), + applierGetter: &applierGroup{ + defaultApplier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(fmt.Errorf("error")) + return p + }(), + }, + appCfg: &config.KubernetesApplicationSpec{ + QuickSync: config.K8sSyncStageOptions{ + AddVariantLabelToSelector: true, + }, + }, + }, + }, + { + name: "successfully apply manifests", + want: model.StageStatus_STAGE_SUCCESS, + executor: &deployExecutor{ + Input: executor.Input{ + Deployment: &model.Deployment{ + Trigger: &model.DeploymentTrigger{ + Commit: &model.Commit{}, + }, + }, + PipedConfig: &config.PipedSpec{}, + LogPersister: &fakeLogPersister{}, + AppManifestsCache: func() cache.Cache { + c := cachetest.NewMockCache(ctrl) + c.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("not found")) + c.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + return c + }(), + Logger: zap.NewNop(), + }, + loader: func() provider.Loader { + p := kubernetestest.NewMockLoader(ctrl) + p.EXPECT().LoadManifests(gomock.Any()).Return([]provider.Manifest{ + provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{}}, + }), + }, nil) + return p + }(), + applierGetter: &applierGroup{ + defaultApplier: func() provider.Applier { + p := kubernetestest.NewMockApplier(ctrl) + p.EXPECT().ApplyManifest(gomock.Any(), gomock.Any()).Return(nil) + return p + }(), + }, + appCfg: &config.KubernetesApplicationSpec{ + QuickSync: config.K8sSyncStageOptions{ + AddVariantLabelToSelector: true, + }, + }, + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + got := tc.executor.ensureSync(ctx) + assert.Equal(t, tc.want, got) + cancel() + }) + } +} + +func TestFindRemoveResources(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifests []provider.Manifest + liveResources []provider.Manifest + want []provider.ResourceKey + }{ + { + name: "no resource removed", + manifests: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + liveResources: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + want: []provider.ResourceKey{}, + }, + { + name: "one resource removed", + manifests: []provider.Manifest{}, + liveResources: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + want: []provider.ResourceKey{ + { + APIVersion: "v1", + Kind: "Service", + Name: "foo", + }, + }, + }, + { + name: "don't remove resource running in different namespace from manifests", + manifests: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Namespace: "different", + Name: "foo", + }, + }, + }, + liveResources: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: "Service", + Namespace: "namespace", + Name: "foo", + }, + }, + }, + want: []provider.ResourceKey{}, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got := findRemoveResources(tc.manifests, tc.liveResources) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/configmaps.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/configmaps.yaml new file mode 100644 index 0000000000..2317198a95 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/configmaps.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: configmap-name-2 +data: + piped-config.yaml: |- + data \ No newline at end of file diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/deployments.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/deployments.yaml new file mode 100644 index 0000000000..ae8f12cc98 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/deployments.yaml @@ -0,0 +1,172 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + replicas: 10 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + initContainers: + - image: gcr.io/pipecd/init:v0.1.0 + name: helloworld + ports: + - containerPort: 9085 + protocol: TCP + env: + - name: CONFIG_ENV + valueFrom: + configMapKeyRef: + key: key + name: configmap-name-2 + - name: SECRET_ENV + valueFrom: + secretKeyRef: + key: key + name: secret-name-1 + envFrom: + - configMapRef: + name: configmap-name-2 + - secretRef: + name: secret-name-1 + containers: + - args: + - server + image: gcr.io/pipecd/helloworld:v0.1.0-73-ge191187 + imagePullPolicy: IfNotPresent + name: helloworld + ports: + - containerPort: 9085 + protocol: TCP + env: + - name: CONFIG_ENV + valueFrom: + configMapKeyRef: + key: key + name: configmap-name-2 + configMapKeyRef: + key: key2 + name: not-managed-config-map + - name: SECRET_ENV + valueFrom: + secretKeyRef: + key: key + name: secret-name-1 + envFrom: + - configMapRef: + name: configmap-name-2 + - secretRef: + name: secret-name-1 + resources: {} + volumes: + - name: secret-1 + secret: + defaultMode: 256 + secretName: secret-name-1 + - name: secret-2 + secret: + defaultMode: 256 + secretName: secret-name-2 + - configMap: + defaultMode: 420 + name: configmap-name-1 + name: config-1 + - configMap: + defaultMode: 420 + name: configmap-name-2 + name: config-2 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple-canary + creationTimestamp: +spec: + replicas: 9 + selector: + matchLabels: + app: simple + pipecd.dev/variant: canary-variant + strategy: {} + template: + metadata: + creationTimestamp: + labels: + app: simple + pipecd.dev/variant: canary-variant + spec: + initContainers: + - image: gcr.io/pipecd/init:v0.1.0 + name: helloworld + ports: + - containerPort: 9085 + protocol: TCP + env: + - name: CONFIG_ENV + valueFrom: + configMapKeyRef: + key: key + name: configmap-name-2-canary + - name: SECRET_ENV + valueFrom: + secretKeyRef: + key: key + name: secret-name-1-canary + envFrom: + - configMapRef: + name: configmap-name-2-canary + - secretRef: + name: secret-name-1-canary + resources: {} + containers: + - args: + - server + image: gcr.io/pipecd/helloworld:v0.1.0-73-ge191187 + imagePullPolicy: IfNotPresent + name: helloworld + ports: + - containerPort: 9085 + protocol: TCP + env: + - name: CONFIG_ENV + valueFrom: + configMapKeyRef: + key: key + name: configmap-name-2-canary + configMapKeyRef: + key: key2 + name: not-managed-config-map + - name: SECRET_ENV + valueFrom: + secretKeyRef: + key: key + name: secret-name-1-canary + envFrom: + - configMapRef: + name: configmap-name-2-canary + - secretRef: + name: secret-name-1-canary + resources: {} + volumes: + - name: secret-1 + secret: + defaultMode: 256 + secretName: secret-name-1-canary + - name: secret-2 + secret: + defaultMode: 256 + secretName: secret-name-2 + - configMap: + defaultMode: 420 + name: configmap-name-1 + name: config-1 + - configMap: + defaultMode: 420 + name: configmap-name-2-canary + name: config-2 +status: {} diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/generated-virtual-service-for-editable-routes.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/generated-virtual-service-for-editable-routes.yaml new file mode 100644 index 0000000000..f41b22e398 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/generated-virtual-service-for-editable-routes.yaml @@ -0,0 +1,57 @@ +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: helloworld +spec: + hosts: + - helloworld + http: + - name: no-specified-destinations + - name: include-destinations-for-all-variants + route: + - destination: + host: helloworld + subset: primary + weight: 100 + - destination: + host: helloworld + subset: canary + - destination: + host: helloworld + subset: baseline + - name: zero-weights-were-not-specified + route: + - destination: + host: helloworld + subset: primary + weight: 100 + - match: + - headers: + end-user: + exact: jason + ignoreUriCase: true + uri: + prefix: /ratings/v2/ + name: only-primary-destination + route: + - destination: + host: helloworld + subset: primary + weight: 50 + - destination: + host: helloworld + subset: canary + weight: 30 + - destination: + host: helloworld + subset: baseline + weight: 20 + - name: include-destination-to-other-host + route: + - destination: + host: helloworld + subset: primary + weight: 50 + - destination: + host: another-host + weight: 50 diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/generated-virtual-service.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/generated-virtual-service.yaml new file mode 100644 index 0000000000..9ae1c57296 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/generated-virtual-service.yaml @@ -0,0 +1,88 @@ +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: helloworld +spec: + hosts: + - helloworld + http: + - name: no-specified-destinations + route: + - destination: + host: helloworld + subset: primary + weight: 50 + - destination: + host: helloworld + subset: canary + weight: 30 + - destination: + host: helloworld + subset: baseline + weight: 20 + - name: include-destinations-for-all-variants + route: + - destination: + host: helloworld + subset: primary + weight: 50 + - destination: + host: helloworld + subset: canary + weight: 30 + - destination: + host: helloworld + subset: baseline + weight: 20 + - name: zero-weights-were-not-specified + route: + - destination: + host: helloworld + subset: primary + weight: 50 + - destination: + host: helloworld + subset: canary + weight: 30 + - destination: + host: helloworld + subset: baseline + weight: 20 + - match: + - headers: + end-user: + exact: jason + ignoreUriCase: true + uri: + prefix: /ratings/v2/ + name: only-primary-destination + route: + - destination: + host: helloworld + subset: primary + weight: 50 + - destination: + host: helloworld + subset: canary + weight: 30 + - destination: + host: helloworld + subset: baseline + weight: 20 + - name: include-destination-to-other-host + route: + - destination: + host: helloworld + subset: primary + weight: 25 + - destination: + host: helloworld + subset: canary + weight: 15 + - destination: + host: helloworld + subset: baseline + weight: 10 + - destination: + host: another-host + weight: 50 diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/no-config-deployments.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/no-config-deployments.yaml new file mode 100644 index 0000000000..4b51cc6d14 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/no-config-deployments.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + replicas: 10 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - args: + - server + image: gcr.io/pipecd/helloworld:v0.1.0-73-ge191187 + imagePullPolicy: IfNotPresent + name: helloworld + ports: + - containerPort: 9085 + protocol: TCP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple-canary + creationTimestamp: +spec: + replicas: 9 + selector: + matchLabels: + app: simple + pipecd.dev/variant: canary-variant + strategy: {} + template: + metadata: + creationTimestamp: + labels: + app: simple + pipecd.dev/variant: canary-variant + spec: + containers: + - args: + - server + image: gcr.io/pipecd/helloworld:v0.1.0-73-ge191187 + imagePullPolicy: IfNotPresent + name: helloworld + ports: + - containerPort: 9085 + protocol: TCP + resources: {} +status: {} \ No newline at end of file diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap.yaml new file mode 100644 index 0000000000..e6bb2401b5 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + key1: value1 + key2: value2 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + key1: value-1 + key2: value2 diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_field.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_field.yaml new file mode 100644 index 0000000000..56e406b95c --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_field.yaml @@ -0,0 +1,77 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: envoy-config +data: + envoy-config: | + admin: + address: + socket_address: + address: 0.0.0.0 + port_value: 9095 + static_resources: + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9090 + clusters: + - name: grpc-piped-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + - name: grpc-web-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + - name: grpc-api-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + - name: server-http + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: envoy-config +data: + envoy-config: | + admin: + address: + socket_address: + address: 0.0.0.0 + port_value: 9096 + static_resources: + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9090 + clusters: + - name: grpc-piped-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + - name: grpc-web-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + - name: grpc-api-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + - name: server-http + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_field_multi_ops.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_field_multi_ops.yaml new file mode 100644 index 0000000000..efeb218de7 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_field_multi_ops.yaml @@ -0,0 +1,295 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: envoy-config +data: + envoy-config: | + admin: + address: + socket_address: + address: 0.0.0.0 + port_value: 9095 + static_resources: + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9090 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: ingress_http + access_log: + - name: envoy.access_loggers.stdout + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + http_filters: + - name: envoy.filters.http.grpc_web + - name: envoy.filters.http.cors + - name: envoy.filters.http.grpc_stats + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_stats.v3.FilterConfig + stats_for_all_methods: true + enable_upstream_stats: true + - name: envoy.filters.http.router + route_config: + name: local_route + virtual_hosts: + - name: envoy + domains: + - '*' + cors: + allow_origin_string_match: + - exact: http://localhost:9090 + allow_methods: GET, PUT, DELETE, POST, OPTIONS + allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,custom-header-1,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout,authorization + allow_credentials: true + max_age: "1728000" + expose_headers: custom-header-1,grpc-status,grpc-message + routes: + - match: + prefix: /service.pipedservice.PipedService/ + grpc: {} + route: + cluster: grpc-piped-service + - match: + prefix: /service.webservice.WebService/ + grpc: {} + route: + cluster: grpc-web-service + - match: + prefix: /service.apiservice.APIService/ + grpc: {} + route: + cluster: grpc-api-service + - match: + prefix: / + route: + cluster: server-http + transport_socket: + name: envoy.transport_socket.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/test-secret/internal-tls.cert + private_key: + filename: /etc/test-secret/internal-tls.key + alpn_protocols: h2 + clusters: + - name: grpc-piped-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: grpc-piped-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 9080 + track_cluster_stats: + request_response_sizes: true + - name: grpc-web-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: grpc-web-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 9081 + track_cluster_stats: + request_response_sizes: true + - name: grpc-api-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: grpc-api-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 9083 + track_cluster_stats: + request_response_sizes: true + - name: server-http + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: server-http + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 9082 + track_cluster_stats: + request_response_sizes: true +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: envoy-config +data: + envoy-config: | + admin: + address: + socket_address: + address: 0.0.0.0 + port_value: 19095 + static_resources: + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9090 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: ingress_http + access_log: + - name: envoy.access_loggers.stdout + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + http_filters: + - name: envoy.filters.http.grpc_web + - name: envoy.filters.http.cors + - name: envoy.filters.http.grpc_stats + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_stats.v3.FilterConfig + stats_for_all_methods: true + enable_upstream_stats: true + - name: envoy.filters.http.router + route_config: + name: local_route + virtual_hosts: + - name: envoy + domains: + - '*' + cors: + allow_origin_string_match: + - exact: http://localhost:9090 + allow_methods: GET, PUT, DELETE, POST, OPTIONS + allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,custom-header-1,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout,authorization + allow_credentials: true + max_age: "1728000" + expose_headers: custom-header-1,grpc-status,grpc-message + routes: + - match: + prefix: /service.pipedservice.PipedService/ + grpc: {} + route: + cluster: grpc-piped-service + - match: + prefix: /service.webservice.WebService/ + grpc: {} + route: + cluster: grpc-web-service + - match: + prefix: /service.apiservice.APIService/ + grpc: {} + route: + cluster: grpc-api-service + - match: + prefix: / + route: + cluster: server-http + transport_socket: + name: envoy.transport_socket.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/test-secret/internal-tls.cert + private_key: + filename: /etc/test-secret/internal-tls.key + alpn_protocols: h2 + clusters: + - name: grpc-piped-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: grpc-piped-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 9080 + track_cluster_stats: + request_response_sizes: true + - name: grpc-web-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: grpc-web-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 19081 + track_cluster_stats: + request_response_sizes: true + - name: grpc-api-service + http2_protocol_options: {} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: grpc-api-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 9083 + track_cluster_stats: + request_response_sizes: true + - name: server-http + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: server-http + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: test-server + port_value: 9082 + track_cluster_stats: + request_response_sizes: true diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_multi_ops.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_multi_ops.yaml new file mode 100644 index 0000000000..4bb074df1c --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/patch_configmap_multi_ops.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + key1: value1 + key2: value2 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + key1: value-1 + key2: value-2 diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/secrets.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/secrets.yaml new file mode 100644 index 0000000000..3b60b24148 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/secrets.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: secret-name-1 +type: Opaque +data: + data: foo \ No newline at end of file diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/services.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/services.yaml new file mode 100644 index 0000000000..b6a0323a46 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/services.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: simple +spec: + ports: + - port: 9085 + protocol: TCP + targetPort: 9085 + selector: + app: simple + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + name: simple-canary + creationTimestamp: +spec: + ports: + - port: 9085 + protocol: TCP + targetPort: 9085 + selector: + app: simple + pipecd.dev/variant: canary-variant + type: ClusterIP +status: + loadBalancer: {} \ No newline at end of file diff --git a/pkg/app/pipedv1/executor/kubernetes/testdata/virtual-service.yaml b/pkg/app/pipedv1/executor/kubernetes/testdata/virtual-service.yaml new file mode 100644 index 0000000000..7cda63d000 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/testdata/virtual-service.yaml @@ -0,0 +1,50 @@ +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: helloworld +spec: + hosts: + - helloworld + http: + - name: no-specified-destinations + - name: include-destinations-for-all-variants + route: + - destination: + host: helloworld + subset: primary + weight: 100 + - destination: + host: helloworld + subset: canary + weight: 0 + - destination: + host: helloworld + subset: baseline + weight: 0 + - name: zero-weights-were-not-specified + route: + - destination: + host: helloworld + subset: primary + weight: 100 + - name: only-primary-destination + match: + - headers: + end-user: + exact: jason + ignoreUriCase: true + uri: + prefix: /ratings/v2/ + route: + - destination: + host: helloworld + subset: primary + - name: include-destination-to-other-host + route: + - destination: + host: helloworld + subset: primary + weight: 50 + - destination: + host: another-host + weight: 50 diff --git a/pkg/app/pipedv1/executor/kubernetes/traffic.go b/pkg/app/pipedv1/executor/kubernetes/traffic.go new file mode 100644 index 0000000000..8fc9775189 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/traffic.go @@ -0,0 +1,433 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + + "go.uber.org/zap" + istiov1alpha3 "istio.io/api/networking/v1alpha3" + istiov1beta1 "istio.io/api/networking/v1beta1" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + primaryMetadataKey = "primary-percentage" + canaryMetadataKey = "canary-percentage" + baselineMetadataKey = "baseline-percentage" +) + +func (e *deployExecutor) ensureTrafficRouting(ctx context.Context) model.StageStatus { + var ( + commitHash = e.Deployment.Trigger.Commit.Hash + options = e.StageConfig.K8sTrafficRoutingStageOptions + variantLabel = e.appCfg.VariantLabel.Key + primaryVariant = e.appCfg.VariantLabel.PrimaryValue + ) + if options == nil { + e.LogPersister.Errorf("Malformed configuration for stage %s", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + method := config.DetermineKubernetesTrafficRoutingMethod(e.appCfg.TrafficRouting) + + // Load the manifests at the triggered commit. + e.LogPersister.Infof("Loading manifests at commit %s for handling", commitHash) + manifests, err := loadManifests( + ctx, + e.Deployment.ApplicationId, + e.commit, + e.AppManifestsCache, + e.loader, + e.Logger, + ) + if err != nil { + e.LogPersister.Errorf("Failed while loading manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.LogPersister.Successf("Successfully loaded %d manifests", len(manifests)) + + if len(manifests) == 0 { + e.LogPersister.Error("There are no kubernetes manifests to handle") + return model.StageStatus_STAGE_FAILURE + } + + // Decide traffic routing percentage for all variants. + primaryPercent, canaryPercent, baselinePercent := options.Percentages() + e.saveTrafficRoutingMetadata(ctx, primaryPercent, canaryPercent, baselinePercent) + + // Find traffic routing manifests. + trafficRoutingManifests, err := findTrafficRoutingManifests(manifests, e.appCfg.Service.Name, e.appCfg.TrafficRouting) + if err != nil { + e.LogPersister.Errorf("Failed while finding traffic routing manifest: (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + switch len(trafficRoutingManifests) { + case 1: + break + case 0: + e.LogPersister.Errorf("Unable to find any traffic routing manifests") + return model.StageStatus_STAGE_FAILURE + default: + e.LogPersister.Infof( + "Detected %d traffic routing manifests but only the first one (%s) will be used", + len(trafficRoutingManifests), + trafficRoutingManifests[0].Key.ReadableString(), + ) + } + trafficRoutingManifest := trafficRoutingManifests[0] + + // In case we are routing by PodSelector, the service manifest must contain variantLabel inside its selector. + if method == config.KubernetesTrafficRoutingMethodPodSelector { + if err := checkVariantSelectorInService(trafficRoutingManifest, variantLabel, primaryVariant); err != nil { + e.LogPersister.Errorf("Traffic routing by PodSelector requires %q inside the selector of Service manifest but it was unable to check that field in manifest %s (%v)", + variantLabel+": "+primaryVariant, + trafficRoutingManifest.Key.ReadableString(), + err, + ) + return model.StageStatus_STAGE_FAILURE + } + } + + trafficRoutingManifest, err = e.generateTrafficRoutingManifest( + trafficRoutingManifest, + primaryPercent, + canaryPercent, + baselinePercent, + ) + if err != nil { + e.LogPersister.Errorf("Unable generate traffic routing manifest: (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Add builtin annotations for tracking application live state. + addBuiltinAnnotations( + []provider.Manifest{trafficRoutingManifest}, + variantLabel, + primaryVariant, + commitHash, + e.PipedConfig.PipedID, + e.Deployment.ApplicationId, + ) + + e.LogPersister.Infof("Start updating traffic routing to be percentages: primary=%d, canary=%d, baseline=%d", + primaryPercent, + canaryPercent, + baselinePercent, + ) + if err := applyManifests(ctx, e.applierGetter, []provider.Manifest{trafficRoutingManifest}, e.appCfg.Input.Namespace, e.LogPersister); err != nil { + return model.StageStatus_STAGE_FAILURE + } + + e.LogPersister.Success("Successfully updated traffic routing") + return model.StageStatus_STAGE_SUCCESS +} + +func findTrafficRoutingManifests(manifests []provider.Manifest, serviceName string, cfg *config.KubernetesTrafficRouting) ([]provider.Manifest, error) { + method := config.DetermineKubernetesTrafficRoutingMethod(cfg) + + switch method { + case config.KubernetesTrafficRoutingMethodPodSelector: + return findManifests(provider.KindService, serviceName, manifests), nil + + case config.KubernetesTrafficRoutingMethodIstio: + istioConfig := cfg.Istio + if istioConfig == nil { + istioConfig = &config.IstioTrafficRouting{} + } + return findIstioVirtualServiceManifests(manifests, istioConfig.VirtualService) + + default: + return nil, fmt.Errorf("unsupport traffic routing method %v", method) + } +} + +func (e *deployExecutor) generateTrafficRoutingManifest(manifest provider.Manifest, primaryPercent, canaryPercent, baselinePercent int) (provider.Manifest, error) { + // Because the loaded manifests are read-only + // so we duplicate them to avoid updating the shared manifests data in cache. + manifest = duplicateManifest(manifest, "") + + // When all traffic should be routed to primary variant + // we do not need to change the traffic manifest + // just copy and return the one specified in the target commit. + if primaryPercent == 100 { + return manifest, nil + } + + cfg := e.appCfg.TrafficRouting + if cfg != nil && cfg.Method == config.KubernetesTrafficRoutingMethodIstio { + istioConfig := cfg.Istio + if istioConfig == nil { + istioConfig = &config.IstioTrafficRouting{} + } + + if strings.HasPrefix(manifest.Key.APIVersion, "v1alpha3") { + return e.generateVirtualServiceManifestV1Alpha3(manifest, istioConfig.Host, istioConfig.EditableRoutes, int32(canaryPercent), int32(baselinePercent)) + } + return e.generateVirtualServiceManifest(manifest, istioConfig.Host, istioConfig.EditableRoutes, int32(canaryPercent), int32(baselinePercent)) + } + + // Determine which variant will receive 100% percent of traffic. + var variant string + switch { + case primaryPercent == 100: + variant = e.appCfg.VariantLabel.PrimaryValue + case canaryPercent == 100: + variant = e.appCfg.VariantLabel.CanaryValue + default: + return manifest, fmt.Errorf("traffic routing by pod requires either PRIMARY or CANARY must be 100 (primary=%d, canary=%d)", primaryPercent, canaryPercent) + } + + variantLabel := e.appCfg.VariantLabel.Key + if err := manifest.AddStringMapValues(map[string]string{variantLabel: variant}, "spec", "selector"); err != nil { + return manifest, fmt.Errorf("unable to update selector for service %q because of: %w", manifest.Key.Name, err) + } + + return manifest, nil +} + +func (e *deployExecutor) saveTrafficRoutingMetadata(ctx context.Context, primary, canary, baseline int) { + metadata := map[string]string{ + primaryMetadataKey: strconv.FormatInt(int64(primary), 10), + canaryMetadataKey: strconv.FormatInt(int64(canary), 10), + baselineMetadataKey: strconv.FormatInt(int64(baseline), 10), + } + if err := e.MetadataStore.Stage(e.Stage.Id).PutMulti(ctx, metadata); err != nil { + e.Logger.Error("failed to save traffic routing percentages to metadata", zap.Error(err)) + } +} + +func findIstioVirtualServiceManifests(manifests []provider.Manifest, ref config.K8sResourceReference) ([]provider.Manifest, error) { + const ( + istioNetworkingAPIVersionPrefix = "networking.istio.io/" + istioVirtualServiceKind = "VirtualService" + ) + + if ref.Kind != "" && ref.Kind != istioVirtualServiceKind { + return nil, fmt.Errorf("support only %q kind for VirtualService reference", istioVirtualServiceKind) + } + + out := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + if !strings.HasPrefix(m.Key.APIVersion, istioNetworkingAPIVersionPrefix) { + continue + } + if m.Key.Kind != istioVirtualServiceKind { + continue + } + if ref.Name != "" && m.Key.Name != ref.Name { + continue + } + out = append(out, m) + } + + return out, nil +} + +func (e *deployExecutor) generateVirtualServiceManifest(m provider.Manifest, host string, editableRoutes []string, canaryPercent, baselinePercent int32) (provider.Manifest, error) { + // Because the loaded manifests are read-only + // so we duplicate them to avoid updating the shared manifests data in cache. + m = duplicateManifest(m, "") + + spec, err := m.GetSpec() + if err != nil { + return m, err + } + + vs := istiov1beta1.VirtualService{} + data, err := json.Marshal(spec) + if err != nil { + return m, err + } + if err := json.Unmarshal(data, &vs); err != nil { + return m, err + } + + editableMap := make(map[string]struct{}, len(editableRoutes)) + for _, r := range editableRoutes { + editableMap[r] = struct{}{} + } + + for _, http := range vs.Http { + if len(editableMap) > 0 { + if _, ok := editableMap[http.Name]; !ok { + continue + } + } + + var ( + otherHostWeight int32 + otherHostRoutes = make([]*istiov1beta1.HTTPRouteDestination, 0) + ) + for _, r := range http.Route { + if r.Destination != nil && r.Destination.Host != host { + otherHostWeight += r.Weight + otherHostRoutes = append(otherHostRoutes, r) + } + } + + var ( + variantsWeight = 100 - otherHostWeight + canaryWeight = canaryPercent * variantsWeight / 100 + baselineWeight = baselinePercent * variantsWeight / 100 + primaryWeight = variantsWeight - canaryWeight - baselineWeight + routes = make([]*istiov1beta1.HTTPRouteDestination, 0, len(otherHostRoutes)+3) + ) + + routes = append(routes, &istiov1beta1.HTTPRouteDestination{ + Destination: &istiov1beta1.Destination{ + Host: host, + Subset: e.appCfg.VariantLabel.PrimaryValue, + }, + Weight: primaryWeight, + }) + if canaryWeight > 0 { + routes = append(routes, &istiov1beta1.HTTPRouteDestination{ + Destination: &istiov1beta1.Destination{ + Host: host, + Subset: e.appCfg.VariantLabel.CanaryValue, + }, + Weight: canaryWeight, + }) + } + if baselineWeight > 0 { + routes = append(routes, &istiov1beta1.HTTPRouteDestination{ + Destination: &istiov1beta1.Destination{ + Host: host, + Subset: e.appCfg.VariantLabel.BaselineValue, + }, + Weight: baselineWeight, + }) + } + routes = append(routes, otherHostRoutes...) + http.Route = routes + } + + if err := m.SetStructuredSpec(vs); err != nil { + return m, err + } + + return m, nil +} + +func (e *deployExecutor) generateVirtualServiceManifestV1Alpha3(m provider.Manifest, host string, editableRoutes []string, canaryPercent, baselinePercent int32) (provider.Manifest, error) { + // Because the loaded manifests are read-only + // so we duplicate them to avoid updating the shared manifests data in cache. + m = duplicateManifest(m, "") + + spec, err := m.GetSpec() + if err != nil { + return m, err + } + + vs := istiov1alpha3.VirtualService{} + data, err := json.Marshal(spec) + if err != nil { + return m, err + } + if err := json.Unmarshal(data, &vs); err != nil { + return m, err + } + + editableMap := make(map[string]struct{}, len(editableRoutes)) + for _, r := range editableRoutes { + editableMap[r] = struct{}{} + } + + for _, http := range vs.Http { + if len(editableMap) > 0 { + if _, ok := editableMap[http.Name]; !ok { + continue + } + } + + var ( + otherHostWeight int32 + otherHostRoutes = make([]*istiov1alpha3.HTTPRouteDestination, 0) + ) + for _, r := range http.Route { + if r.Destination != nil && r.Destination.Host != host { + otherHostWeight += r.Weight + otherHostRoutes = append(otherHostRoutes, r) + } + } + + var ( + variantsWeight = 100 - otherHostWeight + canaryWeight = canaryPercent * variantsWeight / 100 + baselineWeight = baselinePercent * variantsWeight / 100 + primaryWeight = variantsWeight - canaryWeight - baselineWeight + routes = make([]*istiov1alpha3.HTTPRouteDestination, 0, len(otherHostRoutes)+3) + ) + + routes = append(routes, &istiov1alpha3.HTTPRouteDestination{ + Destination: &istiov1alpha3.Destination{ + Host: host, + Subset: e.appCfg.VariantLabel.PrimaryValue, + }, + Weight: primaryWeight, + }) + if canaryWeight > 0 { + routes = append(routes, &istiov1alpha3.HTTPRouteDestination{ + Destination: &istiov1alpha3.Destination{ + Host: host, + Subset: e.appCfg.VariantLabel.CanaryValue, + }, + Weight: canaryWeight, + }) + } + if baselineWeight > 0 { + routes = append(routes, &istiov1alpha3.HTTPRouteDestination{ + Destination: &istiov1alpha3.Destination{ + Host: host, + Subset: e.appCfg.VariantLabel.BaselineValue, + }, + Weight: baselineWeight, + }) + } + routes = append(routes, otherHostRoutes...) + http.Route = routes + } + + if err := m.SetStructuredSpec(vs); err != nil { + return m, err + } + + return m, nil +} + +func checkVariantSelectorInService(m provider.Manifest, variantLabel, variant string) error { + selector, err := m.GetNestedStringMap("spec", "selector") + if err != nil { + return err + } + + value, ok := selector[variantLabel] + if !ok { + return fmt.Errorf("missing %s key in spec.selector", variantLabel) + } + + if value != variant { + return fmt.Errorf("require %s but got %s for %s key in spec.selector", variant, value, variantLabel) + } + return nil +} diff --git a/pkg/app/pipedv1/executor/kubernetes/traffic_test.go b/pkg/app/pipedv1/executor/kubernetes/traffic_test.go new file mode 100644 index 0000000000..96a9cd7752 --- /dev/null +++ b/pkg/app/pipedv1/executor/kubernetes/traffic_test.go @@ -0,0 +1,146 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" +) + +func TestGenerateVirtualServiceManifest(t *testing.T) { + t.Parallel() + + exec := &deployExecutor{ + appCfg: &config.KubernetesApplicationSpec{ + VariantLabel: config.KubernetesVariantLabel{ + Key: "pipecd.dev/variant", + PrimaryValue: "primary", + BaselineValue: "baseline", + CanaryValue: "canary", + }, + }, + } + testcases := []struct { + name string + manifestFile string + editableRoutes []string + expectedFile string + }{ + { + name: "apply all routes", + manifestFile: "testdata/virtual-service.yaml", + expectedFile: "testdata/generated-virtual-service.yaml", + }, + { + name: "apply only speficied routes", + manifestFile: "testdata/virtual-service.yaml", + editableRoutes: []string{"only-primary-destination"}, + expectedFile: "testdata/generated-virtual-service-for-editable-routes.yaml", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.LoadManifestsFromYAMLFile(tc.manifestFile) + require.NoError(t, err) + require.Equal(t, 1, len(manifests)) + + generatedManifest, err := exec.generateVirtualServiceManifest(manifests[0], "helloworld", tc.editableRoutes, 30, 20) + assert.NoError(t, err) + + expectedManifests, err := provider.LoadManifestsFromYAMLFile(tc.expectedFile) + require.NoError(t, err) + require.Equal(t, 1, len(expectedManifests)) + + expected, err := expectedManifests[0].YamlBytes() + require.NoError(t, err) + got, err := generatedManifest.YamlBytes() + require.NoError(t, err) + + assert.EqualValues(t, string(expected), string(got)) + }) + } +} + +func TestCheckVariantSelectorInService(t *testing.T) { + t.Parallel() + + const ( + variantLabel = "pipecd.dev/variant" + primaryVariant = "primary" + ) + testcases := []struct { + name string + manifest string + expected error + }{ + { + name: "missing variant selector", + manifest: ` +apiVersion: v1 +kind: Service +metadata: + name: simple +spec: + selector: + app: simple +`, + expected: fmt.Errorf("missing pipecd.dev/variant key in spec.selector"), + }, + { + name: "wrong variant", + manifest: ` +apiVersion: v1 +kind: Service +metadata: + name: simple +spec: + selector: + app: simple + pipecd.dev/variant: canary +`, + expected: fmt.Errorf("require primary but got canary for pipecd.dev/variant key in spec.selector"), + }, + { + name: "ok", + manifest: ` +apiVersion: v1 +kind: Service +metadata: + name: simple +spec: + selector: + app: simple + pipecd.dev/variant: primary +`, + expected: nil, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.ParseManifests(tc.manifest) + require.NoError(t, err) + require.Equal(t, 1, len(manifests)) + + err = checkVariantSelectorInService(manifests[0], variantLabel, primaryVariant) + assert.Equal(t, tc.expected, err) + }) + } +} diff --git a/pkg/app/pipedv1/executor/lambda/deploy.go b/pkg/app/pipedv1/executor/lambda/deploy.go new file mode 100644 index 0000000000..23724fddf7 --- /dev/null +++ b/pkg/app/pipedv1/executor/lambda/deploy.go @@ -0,0 +1,130 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "context" + "strconv" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" + + "go.uber.org/zap" +) + +const promotePercentageMetadataKey = "promote-percentage" + +type deployExecutor struct { + executor.Input + + deploySource *deploysource.DeploySource + appCfg *config.LambdaApplicationSpec + platformProviderName string + platformProviderCfg *config.PlatformProviderLambdaConfig +} + +func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus { + ctx := sig.Context() + ds, err := e.TargetDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.deploySource = ds + e.appCfg = ds.ApplicationConfig.LambdaApplicationSpec + if e.appCfg == nil { + e.LogPersister.Errorf("Malformed application configuration: missing LambdaApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + var found bool + e.platformProviderName, e.platformProviderCfg, found = findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + var ( + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageLambdaSync: + status = e.ensureSync(ctx) + case model.StageLambdaPromote: + status = e.ensurePromote(ctx) + case model.StageLambdaCanaryRollout: + status = e.ensureRollout(ctx) + default: + e.LogPersister.Errorf("Unsupported stage %s for lambda application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *deployExecutor) ensureSync(ctx context.Context) model.StageStatus { + fm, ok := loadFunctionManifest(&e.Input, e.appCfg.Input.FunctionManifestFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + if !sync(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, fm) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensurePromote(ctx context.Context) model.StageStatus { + options := e.StageConfig.LambdaPromoteStageOptions + if options == nil { + e.LogPersister.Errorf("Malformed configuration for stage %s", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + metadata := map[string]string{ + promotePercentageMetadataKey: strconv.FormatInt(int64(options.Percent.Int()), 10), + } + if err := e.MetadataStore.Stage(e.Stage.Id).PutMulti(ctx, metadata); err != nil { + e.Logger.Error("failed to save routing percentages to metadata", zap.Error(err)) + } + + fm, ok := loadFunctionManifest(&e.Input, e.appCfg.Input.FunctionManifestFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + if !promote(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, fm) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensureRollout(ctx context.Context) model.StageStatus { + fm, ok := loadFunctionManifest(&e.Input, e.appCfg.Input.FunctionManifestFile, e.deploySource) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + if !rollout(ctx, &e.Input, e.platformProviderName, e.platformProviderCfg, fm) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/lambda/lambda.go b/pkg/app/pipedv1/executor/lambda/lambda.go new file mode 100644 index 0000000000..4eee4c2368 --- /dev/null +++ b/pkg/app/pipedv1/executor/lambda/lambda.go @@ -0,0 +1,407 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "archive/zip" + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/lambda" + "github.com/pipe-cd/pipecd/pkg/backoff" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error + RegisterRollback(kind model.RollbackKind, f executor.Factory) error +} + +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &deployExecutor{ + Input: in, + } + } + r.Register(model.StageLambdaSync, f) + r.Register(model.StageLambdaPromote, f) + r.Register(model.StageLambdaCanaryRollout, f) + + r.RegisterRollback(model.RollbackKind_Rollback_LAMBDA, func(in executor.Input) executor.Executor { + return &rollbackExecutor{ + Input: in, + } + }) +} + +func findPlatformProvider(in *executor.Input) (name string, cfg *config.PlatformProviderLambdaConfig, found bool) { + name = in.Application.PlatformProvider + if name == "" { + in.LogPersister.Errorf("Missing the PlatformProvider name in the application configuration") + return + } + + cp, ok := in.PipedConfig.FindPlatformProvider(name, model.ApplicationKind_LAMBDA) + if !ok { + in.LogPersister.Errorf("The specified platform provider %q was not found in piped configuration", name) + return + } + + cfg = cp.LambdaConfig + found = true + return +} + +func loadFunctionManifest(in *executor.Input, functionManifestFile string, ds *deploysource.DeploySource) (provider.FunctionManifest, bool) { + in.LogPersister.Infof("Loading service manifest at commit %s", ds.Revision) + + fm, err := provider.LoadFunctionManifest(ds.AppDir, functionManifestFile) + if err != nil { + in.LogPersister.Errorf("Failed to load lambda function manifest (%v)", err) + return provider.FunctionManifest{}, false + } + + in.LogPersister.Infof("Successfully loaded the lambda function manifest at commit %s", ds.Revision) + return fm, true +} + +func sync(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderLambdaConfig, fm provider.FunctionManifest) bool { + in.LogPersister.Infof("Start applying the lambda function manifest") + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create Lambda client for the provider %s: %v", platformProviderName, err) + return false + } + + // Build and publish new version of Lambda function. + version, ok := build(ctx, in, client, fm) + if !ok { + in.LogPersister.Errorf("Failed to build new version for Lambda function %s", fm.Spec.Name) + return false + } + + trafficCfg, err := client.GetTrafficConfig(ctx, fm) + // Create Alias on not yet existed. + if errors.Is(err, provider.ErrNotFound) { + if err := client.CreateTrafficConfig(ctx, fm, version); err != nil { + in.LogPersister.Errorf("Failed to create traffic routing for Lambda function %s (version: %s): %v", fm.Spec.Name, version, err) + return false + } + in.LogPersister.Infof("Successfully applied the lambda function manifest") + return true + } + if err != nil { + in.LogPersister.Errorf("Failed to prepare traffic routing for Lambda function %s: %v", fm.Spec.Name, err) + return false + } + // Store the current traffic config for rollback if necessary. + if trafficCfg != nil { + originalTrafficCfg, err := trafficCfg.Encode() + if err != nil { + in.LogPersister.Errorf("Unable to store current traffic config for rollback: encode failed: %v", err) + return false + } + originalTrafficKeyName := fmt.Sprintf("original-traffic-%s", in.Deployment.RunningCommitHash) + if e := in.MetadataStore.Shared().Put(ctx, originalTrafficKeyName, originalTrafficCfg); e != nil { + in.LogPersister.Errorf("Unable to store current traffic config for rollback: %v", e) + return false + } + } + + // Update 100% traffic to the new lambda version. + if !configureTrafficRouting(trafficCfg, version, 100) { + in.LogPersister.Errorf("Failed to prepare traffic routing for Lambda function %s", fm.Spec.Name) + return false + } + + if err = client.UpdateTrafficConfig(ctx, fm, trafficCfg); err != nil { + in.LogPersister.Errorf("Failed to update traffic routing for Lambda function %s (version: %s): %v", fm.Spec.Name, version, err) + return false + } + + in.LogPersister.Infof("Successfully applied the manifest for Lambda function %s version (v%s)", fm.Spec.Name, version) + return true +} + +func rollout(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderLambdaConfig, fm provider.FunctionManifest) bool { + in.LogPersister.Infof("Start rolling out the lambda function: %s", fm.Spec.Name) + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create Lambda client for the provider %s: %v", platformProviderName, err) + return false + } + + // Build and publish new version of Lambda function. + version, ok := build(ctx, in, client, fm) + if !ok { + in.LogPersister.Errorf("Failed to build new version for Lambda function %s", fm.Spec.Name) + return false + } + + // Update rolled out version name to metadata store + rolloutVersionKeyName := fmt.Sprintf("%s-rollout", fm.Spec.Name) + if err := in.MetadataStore.Shared().Put(ctx, rolloutVersionKeyName, version); err != nil { + in.LogPersister.Errorf("Failed to update latest version name to metadata store for Lambda function %s: %v", fm.Spec.Name, err) + return false + } + + // Store current traffic config for rollback if necessary. + if trafficCfg, err := client.GetTrafficConfig(ctx, fm); err == nil { + // Store the current traffic config. + originalTrafficCfg, err := trafficCfg.Encode() + if err != nil { + in.LogPersister.Errorf("Unable to store current traffic config for rollback: encode failed: %v", err) + return false + } + originalTrafficKeyName := fmt.Sprintf("original-traffic-%s", in.Deployment.RunningCommitHash) + if e := in.MetadataStore.Shared().Put(ctx, originalTrafficKeyName, originalTrafficCfg); e != nil { + in.LogPersister.Errorf("Unable to store current traffic config for rollback: %v", e) + return false + } + } + + return true +} + +func promote(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderLambdaConfig, fm provider.FunctionManifest) bool { + in.LogPersister.Infof("Start promote new version of the lambda function: %s", fm.Spec.Name) + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create Lambda client for the provider %s: %v", platformProviderName, err) + return false + } + + rolloutVersionKeyName := fmt.Sprintf("%s-rollout", fm.Spec.Name) + version, ok := in.MetadataStore.Shared().Get(rolloutVersionKeyName) + if !ok { + in.LogPersister.Errorf("Unable to prepare version to promote for Lambda function %s: Not found", fm.Spec.Name) + return false + } + + options := in.StageConfig.LambdaPromoteStageOptions + if options == nil { + in.LogPersister.Errorf("Malformed configuration for stage %s", in.Stage.Name) + return false + } + + trafficCfg, err := client.GetTrafficConfig(ctx, fm) + // Create Alias on not yet existed. + if errors.Is(err, provider.ErrNotFound) { + if options.Percent.Int() != 100 { + in.LogPersister.Errorf("Not previous version available to handle traffic, new version has to get 100 percent of traffic") + return false + } + if err := client.CreateTrafficConfig(ctx, fm, version); err != nil { + in.LogPersister.Errorf("Failed to create traffic routing for Lambda function %s (version: %s): %v", fm.Spec.Name, version, err) + return false + } + in.LogPersister.Infof("Successfully route all traffic to the lambda function %s (version %s)", fm.Spec.Name, version) + return true + } + if err != nil { + in.LogPersister.Errorf("Failed to prepare traffic routing for Lambda function %s: %v", fm.Spec.Name, err) + return false + } + + // Update traffic to the new lambda version. + if !configureTrafficRouting(trafficCfg, version, options.Percent.Int()) { + in.LogPersister.Errorf("Failed to prepare traffic routing for Lambda function %s", fm.Spec.Name) + return false + } + + // Store promote traffic config for rollback if necessary. + promoteTrafficCfgData, err := trafficCfg.Encode() + if err != nil { + in.LogPersister.Errorf("Unable to store current traffic config for rollback: encode failed: %v", err) + return false + } + promoteTrafficKeyName := fmt.Sprintf("latest-promote-traffic-%s", in.Deployment.RunningCommitHash) + if err := in.MetadataStore.Shared().Put(ctx, promoteTrafficKeyName, promoteTrafficCfgData); err != nil { + in.LogPersister.Errorf("Unable to store promote traffic config for rollback: %v", err) + return false + } + + if err = client.UpdateTrafficConfig(ctx, fm, trafficCfg); err != nil { + in.LogPersister.Errorf("Failed to update traffic routing for Lambda function %s (version: %s): %v", fm.Spec.Name, version, err) + return false + } + + in.LogPersister.Infof("Successfully promote new version (v%s) of Lambda function %s, it will handle %v percent of traffic", version, fm.Spec.Name, options.Percent) + return true +} + +func configureTrafficRouting(trafficCfg provider.RoutingTrafficConfig, version string, percent int) bool { + // The primary version has to be set on trafficCfg. + primary, ok := trafficCfg[provider.TrafficPrimaryVersionKeyName] + if !ok { + return false + } + // Set built version by rollout stage as new primary. + trafficCfg[provider.TrafficPrimaryVersionKeyName] = provider.VersionTraffic{ + Version: version, + Percent: float64(percent), + } + // Make the current primary version as new secondary version in case it's not the latest built version by rollout stage. + if primary.Version != version { + trafficCfg[provider.TrafficSecondaryVersionKeyName] = provider.VersionTraffic{ + Version: primary.Version, + Percent: float64(100 - percent), + } + } else { + // Update traffic to the secondary and keep it as new secondary. + if secondary, ok := trafficCfg[provider.TrafficSecondaryVersionKeyName]; ok { + trafficCfg[provider.TrafficSecondaryVersionKeyName] = provider.VersionTraffic{ + Version: secondary.Version, + Percent: float64(100 - percent), + } + } + } + return true +} + +func build(ctx context.Context, in *executor.Input, client provider.Client, fm provider.FunctionManifest) (version string, ok bool) { + found, err := client.IsFunctionExist(ctx, fm.Spec.Name) + if err != nil { + in.LogPersister.Errorf("Unable to validate function name %s: %v", fm.Spec.Name, err) + return + } + if found { + if err := updateFunction(ctx, in, client, fm); err != nil { + in.LogPersister.Errorf("Failed to update lambda function %s: %v", fm.Spec.Name, err) + return + } + } else { + if err := createFunction(ctx, in, client, fm); err != nil { + in.LogPersister.Errorf("Failed to create lambda function %s: %v", fm.Spec.Name, err) + return + } + } + + in.LogPersister.Info("Waiting to update lambda function in progress...") + retry := backoff.NewRetry(provider.RequestRetryTime, backoff.NewConstant(provider.RetryIntervalDuration)) + publishFunctionSucceed := false + startWaitingStamp := time.Now() + for retry.WaitNext(ctx) { + // Commit version for applied Lambda function. + // Note: via the current docs of [Lambda.PublishVersion](https://docs.aws.amazon.com/sdk-for-go/api/service/lambda/#Lambda.PublishVersion) + // AWS Lambda doesn't publish a version if the function's configuration and code haven't changed since the last version. + // But currently, unchanged revision is able to make publish (versionId++) as usual. + version, err = client.PublishFunction(ctx, fm) + if err != nil { + in.Logger.Error("Failed publish new version for Lambda function") + } else { + publishFunctionSucceed = true + break + } + } + if !publishFunctionSucceed { + in.LogPersister.Errorf("Failed to commit new version for Lambda function %s: %v", fm.Spec.Name, err) + return + } + + in.LogPersister.Infof("Successfully committed new version (v%s) for Lambda function %s after duration %v", version, fm.Spec.Name, time.Since(startWaitingStamp)) + ok = true + return +} + +func createFunction(ctx context.Context, in *executor.Input, client provider.Client, fm provider.FunctionManifest) error { + if fm.Spec.ImageURI != "" || fm.Spec.S3Bucket != "" { + return client.CreateFunction(ctx, fm) + } + + zip, err := prepareZipFromSource(ctx, in.GitClient, fm) + if err != nil { + in.LogPersister.Errorf("Failed to prepare zip from Lambda function source, remote (%s)", fm.Spec.SourceCode.Git) + return err + } + + return client.CreateFunctionFromSource(ctx, fm, zip) +} + +func updateFunction(ctx context.Context, in *executor.Input, client provider.Client, fm provider.FunctionManifest) error { + if fm.Spec.ImageURI != "" || fm.Spec.S3Bucket != "" { + return client.UpdateFunction(ctx, fm) + } + zip, err := prepareZipFromSource(ctx, in.GitClient, fm) + if err != nil { + in.LogPersister.Errorf("Failed to prepare zip from Lambda function source, remote (%s)", fm.Spec.SourceCode.Git) + return err + } + + return client.UpdateFunctionFromSource(ctx, fm, zip) +} + +func prepareZipFromSource(ctx context.Context, gc executor.GitClient, fm provider.FunctionManifest) (io.Reader, error) { + repo, err := gc.Clone(ctx, fm.Spec.SourceCode.Git, fm.Spec.SourceCode.Git, "", "") + if err != nil { + return nil, err + } + defer repo.Clean() + + if err = repo.Checkout(ctx, fm.Spec.SourceCode.Ref); err != nil { + return nil, err + } + + buf := &bytes.Buffer{} + w := zip.NewWriter(buf) + defer w.Close() + + source := filepath.Join(repo.GetPath(), fm.Spec.SourceCode.Path) + filepath.Walk(source, func(fp string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + header, err := zip.FileInfoHeader(fi) + if err != nil { + return err + } + header.Method = zip.Deflate + header.Name, err = filepath.Rel(filepath.Dir(source), fp) + if err != nil { + return err + } + if fi.IsDir() { + header.Name += "/" + } + headerWriter, err := w.CreateHeader(header) + if err != nil { + return err + } + if fi.IsDir() { + return nil + } + + f, err := os.Open(fp) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(headerWriter, f) + return err + }) + + return buf, nil +} diff --git a/pkg/app/pipedv1/executor/lambda/lambda_test.go b/pkg/app/pipedv1/executor/lambda/lambda_test.go new file mode 100644 index 0000000000..7178277639 --- /dev/null +++ b/pkg/app/pipedv1/executor/lambda/lambda_test.go @@ -0,0 +1,190 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "context" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/lambda" + "github.com/pipe-cd/pipecd/pkg/git" +) + +func TestConfigureTrafficRouting(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + version string + percent int + primary *provider.VersionTraffic + secondary *provider.VersionTraffic + out bool + }{ + { + name: "failed on invalid routing config: primary is missing", + version: "2", + percent: 100, + primary: nil, + secondary: nil, + out: false, + }, + { + name: "configure successfully in case only primary provided", + version: "2", + percent: 100, + primary: &provider.VersionTraffic{ + Version: "1", + Percent: 100, + }, + secondary: nil, + out: true, + }, + { + name: "configure successfully in case set new primary lower than 100 percent", + version: "2", + percent: 70, + primary: &provider.VersionTraffic{ + Version: "1", + Percent: 100, + }, + secondary: nil, + out: true, + }, + { + name: "configure successfully in case set new primary lower than 100 percent and currently 2 versions is set", + version: "3", + percent: 70, + primary: &provider.VersionTraffic{ + Version: "2", + Percent: 50, + }, + secondary: &provider.VersionTraffic{ + Version: "1", + Percent: 50, + }, + out: true, + }, + { + name: "configure successfully in case set new primary to 100 percent and currently 2 versions is set", + version: "3", + percent: 100, + primary: &provider.VersionTraffic{ + Version: "2", + Percent: 50, + }, + secondary: &provider.VersionTraffic{ + Version: "1", + Percent: 50, + }, + out: true, + }, + { + name: "configure successfully in case new primary is the same as current primary", + version: "2", + percent: 100, + primary: &provider.VersionTraffic{ + Version: "2", + Percent: 50, + }, + secondary: &provider.VersionTraffic{ + Version: "1", + Percent: 50, + }, + out: true, + }, + { + name: "configure successfully in case new primary is the same as current secondary", + version: "2", + percent: 100, + primary: &provider.VersionTraffic{ + Version: "1", + Percent: 50, + }, + secondary: &provider.VersionTraffic{ + Version: "2", + Percent: 50, + }, + out: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + trafficCfg := make(map[provider.TrafficConfigKeyName]provider.VersionTraffic) + if tc.primary != nil { + trafficCfg[provider.TrafficPrimaryVersionKeyName] = *tc.primary + } + if tc.secondary != nil { + trafficCfg[provider.TrafficSecondaryVersionKeyName] = *tc.secondary + } + ok := configureTrafficRouting(trafficCfg, tc.version, tc.percent) + assert.Equal(t, tc.out, ok) + if primary, ok := trafficCfg[provider.TrafficPrimaryVersionKeyName]; ok { + assert.Equal(t, tc.version, primary.Version) + assert.Equal(t, float64(tc.percent), primary.Percent) + if secondary, ok := trafficCfg[provider.TrafficSecondaryVersionKeyName]; ok { + assert.Equal(t, float64(100-tc.percent), secondary.Percent) + } + } + }) + } +} + +type fakeRepo struct { + git.Repo + source string +} + +func (m *fakeRepo) GetPath() string { + return m.source +} + +func (m *fakeRepo) Checkout(_ context.Context, _ string) error { + return nil +} + +func (m *fakeRepo) Clean() error { + return nil +} + +type fakeGitClient struct { + repo git.Repo +} + +func (g *fakeGitClient) Clone(_ context.Context, _, _, _, _ string) (git.Repo, error) { + return g.repo, nil +} + +func TestPrepareZipFromSource(t *testing.T) { + t.Parallel() + + gc := &fakeGitClient{ + repo: &fakeRepo{ + source: "testdata/raw", + }, + } + fm := provider.FunctionManifest{} + r, err := prepareZipFromSource(context.Background(), gc, fm) + require.Nil(t, err) + + data, err := io.ReadAll(r) + assert.Nil(t, err) + assert.NotEqual(t, 0, len(data)) +} diff --git a/pkg/app/pipedv1/executor/lambda/rollback.go b/pkg/app/pipedv1/executor/lambda/rollback.go new file mode 100644 index 0000000000..84e44f467a --- /dev/null +++ b/pkg/app/pipedv1/executor/lambda/rollback.go @@ -0,0 +1,163 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "context" + "fmt" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/lambda" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type rollbackExecutor struct { + executor.Input +} + +func (e *rollbackExecutor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + ctx = sig.Context() + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageRollback: + status = e.ensureRollback(ctx) + default: + e.LogPersister.Errorf("Unsupported stage %s for lambda application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *rollbackExecutor) ensureRollback(ctx context.Context) model.StageStatus { + // Not rollback in case this is the first deployment. + if e.Deployment.RunningCommitHash == "" { + e.LogPersister.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") + return model.StageStatus_STAGE_FAILURE + } + + runningDS, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + appCfg := runningDS.ApplicationConfig.LambdaApplicationSpec + if appCfg == nil { + e.LogPersister.Errorf("Malformed application configuration: missing LambdaApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + platformProviderName, platformProviderCfg, found := findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + fm, ok := loadFunctionManifest(&e.Input, appCfg.Input.FunctionManifestFile, runningDS) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + if !rollback(ctx, &e.Input, platformProviderName, platformProviderCfg, fm) { + return model.StageStatus_STAGE_FAILURE + } + + return model.StageStatus_STAGE_SUCCESS +} + +func rollback(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderLambdaConfig, fm provider.FunctionManifest) bool { + in.LogPersister.Infof("Start rollback the lambda function: %s to original stage", fm.Spec.Name) + client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) + if err != nil { + in.LogPersister.Errorf("Unable to create Lambda client for the provider %s: %v", platformProviderName, err) + return false + } + + // Rollback Lambda application configuration to previous state. + if err := client.UpdateFunction(ctx, fm); err != nil { + in.LogPersister.Errorf("Unable to rollback Lambda function %s configuration to previous stage: %v", fm.Spec.Name, err) + return false + } + in.LogPersister.Infof("Rolled back the lambda function %s configuration to original stage", fm.Spec.Name) + + // Rollback traffic routing to previous state. + // Restore original traffic config from metadata store. + originalTrafficKeyName := fmt.Sprintf("original-traffic-%s", in.Deployment.RunningCommitHash) + originalTrafficCfgData, ok := in.MetadataStore.Shared().Get(originalTrafficKeyName) + if !ok { + in.LogPersister.Errorf("Unable to prepare original traffic config to rollback Lambda function %s. No traffic changes have been committed yet.", fm.Spec.Name) + return false + } + + originalTrafficCfg := provider.RoutingTrafficConfig{} + if err := originalTrafficCfg.Decode([]byte(originalTrafficCfgData)); err != nil { + in.LogPersister.Errorf("Unable to prepare original traffic config to rollback Lambda function %s: %v", fm.Spec.Name, err) + return false + } + + // Restore promoted traffic config from metadata store. + promotedTrafficKeyName := fmt.Sprintf("latest-promote-traffic-%s", in.Deployment.RunningCommitHash) + promotedTrafficCfgData, ok := in.MetadataStore.Shared().Get(promotedTrafficKeyName) + // If there is no previous promoted traffic config, which mean no promote run previously so no need to do anything to rollback. + if !ok { + in.LogPersister.Info("It seems the traffic has not been changed during the deployment process. No need to rollback the traffic config.") + return true + } + + promotedTrafficCfg := provider.RoutingTrafficConfig{} + if err := promotedTrafficCfg.Decode([]byte(promotedTrafficCfgData)); err != nil { + in.LogPersister.Errorf("Unable to prepare promoted traffic config to rollback Lambda function %s: %v", fm.Spec.Name, err) + return false + } + + switch len(originalTrafficCfg) { + // Original traffic config has both PRIMARY and SECONDARY version config. + case 2: + if err = client.UpdateTrafficConfig(ctx, fm, originalTrafficCfg); err != nil { + in.LogPersister.Errorf("Failed to rollback original traffic config for Lambda function %s: %v", fm.Spec.Name, err) + return false + } + return true + // Original traffic config is PRIMARY ONLY config, + // we need to reset any others SECONDARY created by previous (until failed) PROMOTE stages. + case 1: + // Validate stored original traffic config, since it PRIMARY ONLY, the percent must be float64(100) + primary, ok := originalTrafficCfg[provider.TrafficPrimaryVersionKeyName] + if !ok || primary.Percent != float64(100) { + in.LogPersister.Errorf("Unable to prepare original traffic config: invalid original traffic config stored") + return false + } + + // Update promoted traffic config by add 0% SECONDARY for reset remote promoted version config. + if !configureTrafficRouting(promotedTrafficCfg, primary.Version, 100) { + in.LogPersister.Errorf("Unable to prepare traffic config to rollback Lambda function %s: can not reset promoted version", fm.Spec.Name) + return false + } + + if err = client.UpdateTrafficConfig(ctx, fm, promotedTrafficCfg); err != nil { + in.LogPersister.Errorf("Failed to rollback original traffic config for Lambda function %s: %v", fm.Spec.Name, err) + return false + } + return true + default: + in.LogPersister.Errorf("Unable to prepare original traffic config: invalid original traffic config stored") + return false + } +} diff --git a/pkg/app/pipedv1/executor/lambda/testdata/raw/test-1/text.txt b/pkg/app/pipedv1/executor/lambda/testdata/raw/test-1/text.txt new file mode 100644 index 0000000000..8e2baefbdb --- /dev/null +++ b/pkg/app/pipedv1/executor/lambda/testdata/raw/test-1/text.txt @@ -0,0 +1 @@ +test-1 diff --git a/pkg/app/pipedv1/executor/lambda/testdata/raw/test-2/.dotfile b/pkg/app/pipedv1/executor/lambda/testdata/raw/test-2/.dotfile new file mode 100644 index 0000000000..38c98d9815 --- /dev/null +++ b/pkg/app/pipedv1/executor/lambda/testdata/raw/test-2/.dotfile @@ -0,0 +1 @@ +test-2 diff --git a/pkg/app/pipedv1/executor/lambda/testdata/raw/text.txt b/pkg/app/pipedv1/executor/lambda/testdata/raw/text.txt new file mode 100644 index 0000000000..9daeafb986 --- /dev/null +++ b/pkg/app/pipedv1/executor/lambda/testdata/raw/text.txt @@ -0,0 +1 @@ +test diff --git a/pkg/app/pipedv1/executor/registry/registry.go b/pkg/app/pipedv1/executor/registry/registry.go index dd4c3afbf1..d7c2728175 100644 --- a/pkg/app/pipedv1/executor/registry/registry.go +++ b/pkg/app/pipedv1/executor/registry/registry.go @@ -19,6 +19,16 @@ import ( "sync" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/analysis" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/customsync" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/ecs" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/lambda" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/scriptrun" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/terraform" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/wait" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/waitapproval" "github.com/pipe-cd/pipecd/pkg/model" ) @@ -91,3 +101,17 @@ var defaultRegistry = ®istry{ func DefaultRegistry() Registry { return defaultRegistry } + +// init registers all built-in executors to the default registry. +func init() { + analysis.Register(defaultRegistry) + cloudrun.Register(defaultRegistry) + kubernetes.Register(defaultRegistry) + lambda.Register(defaultRegistry) + terraform.Register(defaultRegistry) + ecs.Register(defaultRegistry) + wait.Register(defaultRegistry) + waitapproval.Register(defaultRegistry) + customsync.Register(defaultRegistry) + scriptrun.Register(defaultRegistry) +} diff --git a/pkg/app/pipedv1/executor/scriptrun/scriptrun.go b/pkg/app/pipedv1/executor/scriptrun/scriptrun.go new file mode 100644 index 0000000000..e37ccbd50b --- /dev/null +++ b/pkg/app/pipedv1/executor/scriptrun/scriptrun.go @@ -0,0 +1,135 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package scriptrun + +import ( + "os" + "os/exec" + "strings" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error + RegisterRollback(kind model.RollbackKind, f executor.Factory) error +} + +type Executor struct { + executor.Input + + appDir string +} + +func (e *Executor) Execute(sig executor.StopSignal) model.StageStatus { + e.LogPersister.Infof("Start executing the script run stage") + + opts := e.Input.StageConfig.ScriptRunStageOptions + if opts == nil { + e.LogPersister.Error("option for script run stage not found") + return model.StageStatus_STAGE_FAILURE + } + + if opts.Run == "" { + return model.StageStatus_STAGE_SUCCESS + } + + var originalStatus = e.Stage.Status + ds, err := e.TargetDSP.Get(sig.Context(), e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + e.appDir = ds.AppDir + + timeout := e.StageConfig.ScriptRunStageOptions.Timeout.Duration() + + c := make(chan model.StageStatus, 1) + go func() { + c <- e.executeCommand() + }() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + for { + select { + case result := <-c: + return result + case <-timer.C: + e.LogPersister.Errorf("Canceled because of timeout") + return model.StageStatus_STAGE_FAILURE + + case s := <-sig.Ch(): + switch s { + case executor.StopSignalCancel: + e.LogPersister.Info("Canceled by user") + return model.StageStatus_STAGE_CANCELLED + case executor.StopSignalTerminate: + e.LogPersister.Info("Terminated by system") + return originalStatus + default: + e.LogPersister.Error("Unexpected") + return model.StageStatus_STAGE_FAILURE + } + } + } +} + +func (e *Executor) executeCommand() model.StageStatus { + opts := e.StageConfig.ScriptRunStageOptions + + e.LogPersister.Infof("Runnnig commands...") + for _, v := range strings.Split(opts.Run, "\n") { + if v != "" { + e.LogPersister.Infof(" %s", v) + } + } + + envs := make([]string, 0, len(opts.Env)) + for key, value := range opts.Env { + envs = append(envs, key+"="+value) + } + + cmd := exec.Command("/bin/sh", "-l", "-c", opts.Run) + cmd.Dir = e.appDir + cmd.Env = append(os.Environ(), envs...) + cmd.Stdout = e.LogPersister + cmd.Stderr = e.LogPersister + if err := cmd.Run(); err != nil { + e.LogPersister.Errorf("failed to exec command: %w", err) + return model.StageStatus_STAGE_FAILURE + } + return model.StageStatus_STAGE_SUCCESS +} + +type RollbackExecutor struct { + executor.Input +} + +func (e *RollbackExecutor) Execute(sig executor.StopSignal) model.StageStatus { + e.LogPersister.Infof("Unimplement: rollbacking the script run stage") + return model.StageStatus_STAGE_FAILURE +} + +// Register registers this executor factory into a given registerer. +func Register(r registerer) { + r.Register(model.StageScriptRun, func(in executor.Input) executor.Executor { + return &Executor{ + Input: in, + } + }) +} diff --git a/pkg/app/pipedv1/executor/terraform/deploy.go b/pkg/app/pipedv1/executor/terraform/deploy.go new file mode 100644 index 0000000000..0f3589b6f5 --- /dev/null +++ b/pkg/app/pipedv1/executor/terraform/deploy.go @@ -0,0 +1,219 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "context" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/terraform" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type deployExecutor struct { + executor.Input + + repoDir string + appDir string + vars []string + terraformPath string + appCfg *config.TerraformApplicationSpec +} + +func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus { + providerCfg, found := findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + ctx := sig.Context() + ds, err := e.TargetDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.appCfg = ds.ApplicationConfig.TerraformApplicationSpec + if e.appCfg == nil { + e.LogPersister.Error("Malformed application configuration: missing TerraformApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + e.repoDir = ds.RepoDir + e.appDir = ds.AppDir + + e.vars = make([]string, 0, len(providerCfg.Vars)+len(e.appCfg.Input.Vars)) + e.vars = append(e.vars, providerCfg.Vars...) + e.vars = append(e.vars, e.appCfg.Input.Vars...) + + var ( + originalStatus = e.Stage.Status + status model.StageStatus + ) + + var ok bool + e.terraformPath, ok = findTerraform(ctx, e.appCfg.Input.TerraformVersion, e.LogPersister) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + switch model.Stage(e.Stage.Name) { + case model.StageTerraformSync: + status = e.ensureSync(ctx) + + case model.StageTerraformPlan: + status = e.ensurePlan(ctx) + + case model.StageTerraformApply: + status = e.ensureApply(ctx) + + default: + e.LogPersister.Errorf("Unsupported stage %s for terraform application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *deployExecutor) ensureSync(ctx context.Context) model.StageStatus { + var ( + flags = e.appCfg.Input.CommandFlags + envs = e.appCfg.Input.CommandEnvs + cmd = provider.NewTerraform( + e.terraformPath, + e.appDir, + provider.WithVars(e.vars), + provider.WithVarFiles(e.appCfg.Input.VarFiles), + provider.WithAdditionalFlags(flags.Shared, flags.Init, flags.Plan, flags.Apply), + provider.WithAdditionalEnvs(envs.Shared, envs.Init, envs.Plan, envs.Apply), + ) + ) + + if ok := showUsingVersion(ctx, cmd, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + if err := cmd.Init(ctx, e.LogPersister); err != nil { + e.LogPersister.Errorf("Failed to init (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + if ok := selectWorkspace(ctx, cmd, e.appCfg.Input.Workspace, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + planResult, err := cmd.Plan(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to plan (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + if planResult.NoChanges() { + e.LogPersister.Info("No changes to apply") + return model.StageStatus_STAGE_SUCCESS + } + + e.LogPersister.Infof("Detected %d import, %d add, %d change, %d destroy. Those changes will be applied automatically.", planResult.Imports, planResult.Adds, planResult.Changes, planResult.Destroys) + + if err := cmd.Apply(ctx, e.LogPersister); err != nil { + e.LogPersister.Errorf("Failed to apply changes (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.LogPersister.Success("Successfully applied changes") + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensurePlan(ctx context.Context) model.StageStatus { + var ( + flags = e.appCfg.Input.CommandFlags + envs = e.appCfg.Input.CommandEnvs + cmd = provider.NewTerraform( + e.terraformPath, + e.appDir, + provider.WithVars(e.vars), + provider.WithVarFiles(e.appCfg.Input.VarFiles), + provider.WithAdditionalFlags(flags.Shared, flags.Init, flags.Plan, flags.Apply), + provider.WithAdditionalEnvs(envs.Shared, envs.Init, envs.Plan, envs.Apply), + ) + ) + + if ok := showUsingVersion(ctx, cmd, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + if err := cmd.Init(ctx, e.LogPersister); err != nil { + e.LogPersister.Errorf("Failed to init (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + if ok := selectWorkspace(ctx, cmd, e.appCfg.Input.Workspace, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + planResult, err := cmd.Plan(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to plan (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + if planResult.NoChanges() { + e.LogPersister.Success("No changes to apply") + if e.StageConfig.TerraformPlanStageOptions.ExitOnNoChanges { + return model.StageStatus_STAGE_EXITED + } + return model.StageStatus_STAGE_SUCCESS + } + + e.LogPersister.Successf("Detected %d import, %d add, %d change, %d destroy.", planResult.Imports, planResult.Adds, planResult.Changes, planResult.Destroys) + return model.StageStatus_STAGE_SUCCESS +} + +func (e *deployExecutor) ensureApply(ctx context.Context) model.StageStatus { + var ( + flags = e.appCfg.Input.CommandFlags + envs = e.appCfg.Input.CommandEnvs + cmd = provider.NewTerraform( + e.terraformPath, + e.appDir, + provider.WithVars(e.vars), + provider.WithVarFiles(e.appCfg.Input.VarFiles), + provider.WithAdditionalFlags(flags.Shared, flags.Init, flags.Plan, flags.Apply), + provider.WithAdditionalEnvs(envs.Shared, envs.Init, envs.Plan, envs.Apply), + ) + ) + + if ok := showUsingVersion(ctx, cmd, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + if err := cmd.Init(ctx, e.LogPersister); err != nil { + e.LogPersister.Errorf("Failed to init (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + if ok := selectWorkspace(ctx, cmd, e.appCfg.Input.Workspace, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + if err := cmd.Apply(ctx, e.LogPersister); err != nil { + e.LogPersister.Errorf("Failed to apply changes (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.LogPersister.Success("Successfully applied changes") + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/terraform/rollback.go b/pkg/app/pipedv1/executor/terraform/rollback.go new file mode 100644 index 0000000000..3be35435dd --- /dev/null +++ b/pkg/app/pipedv1/executor/terraform/rollback.go @@ -0,0 +1,115 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "context" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/terraform" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type rollbackExecutor struct { + executor.Input +} + +func (e *rollbackExecutor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + ctx = sig.Context() + originalStatus = e.Stage.Status + status model.StageStatus + ) + + switch model.Stage(e.Stage.Name) { + case model.StageRollback: + status = e.ensureRollback(ctx) + + default: + e.LogPersister.Errorf("Unsupported stage %s for terraform application", e.Stage.Name) + return model.StageStatus_STAGE_FAILURE + } + + return executor.DetermineStageStatus(sig.Signal(), originalStatus, status) +} + +func (e *rollbackExecutor) ensureRollback(ctx context.Context) model.StageStatus { + // There is nothing to do if this is the first deployment. + if e.Deployment.RunningCommitHash == "" { + e.LogPersister.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") + return model.StageStatus_STAGE_FAILURE + } + + providerCfg, found := findPlatformProvider(&e.Input) + if !found { + return model.StageStatus_STAGE_FAILURE + } + + ds, err := e.RunningDSP.Get(ctx, e.LogPersister) + if err != nil { + e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + appCfg := ds.ApplicationConfig.TerraformApplicationSpec + if appCfg == nil { + e.LogPersister.Error("Malformed application configuration: missing TerraformApplicationSpec") + return model.StageStatus_STAGE_FAILURE + } + + terraformPath, ok := findTerraform(ctx, appCfg.Input.TerraformVersion, e.LogPersister) + if !ok { + return model.StageStatus_STAGE_FAILURE + } + + vars := make([]string, 0, len(providerCfg.Vars)+len(appCfg.Input.Vars)) + vars = append(vars, providerCfg.Vars...) + vars = append(vars, appCfg.Input.Vars...) + + e.LogPersister.Infof("Start rolling back to the state defined at commit %s", e.Deployment.RunningCommitHash) + var ( + flags = appCfg.Input.CommandFlags + envs = appCfg.Input.CommandEnvs + cmd = provider.NewTerraform( + terraformPath, + ds.AppDir, + provider.WithVars(vars), + provider.WithVarFiles(appCfg.Input.VarFiles), + provider.WithAdditionalFlags(flags.Shared, flags.Init, flags.Plan, flags.Apply), + provider.WithAdditionalEnvs(envs.Shared, envs.Init, envs.Plan, envs.Apply), + ) + ) + + if ok := showUsingVersion(ctx, cmd, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + if err := cmd.Init(ctx, e.LogPersister); err != nil { + e.LogPersister.Errorf("Failed to init (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + if ok := selectWorkspace(ctx, cmd, appCfg.Input.Workspace, e.LogPersister); !ok { + return model.StageStatus_STAGE_FAILURE + } + + if err := cmd.Apply(ctx, e.LogPersister); err != nil { + e.LogPersister.Errorf("Failed to apply changes (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + e.LogPersister.Success("Successfully rolled back the changes") + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/executor/terraform/terraform.go b/pkg/app/pipedv1/executor/terraform/terraform.go new file mode 100644 index 0000000000..a72ffc5b32 --- /dev/null +++ b/pkg/app/pipedv1/executor/terraform/terraform.go @@ -0,0 +1,99 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "context" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/terraform" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error + RegisterRollback(kind model.RollbackKind, f executor.Factory) error +} + +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &deployExecutor{ + Input: in, + } + } + r.Register(model.StageTerraformSync, f) + r.Register(model.StageTerraformPlan, f) + r.Register(model.StageTerraformApply, f) + + r.RegisterRollback(model.RollbackKind_Rollback_TERRAFORM, func(in executor.Input) executor.Executor { + return &rollbackExecutor{ + Input: in, + } + }) +} + +func showUsingVersion(ctx context.Context, cmd *provider.Terraform, lp executor.LogPersister) bool { + version, err := cmd.Version(ctx) + if err != nil { + lp.Errorf("Failed to check terraform version (%v)", err) + return false + } + lp.Infof("Using terraform version %q to execute the terraform commands", version) + return true +} + +func selectWorkspace(ctx context.Context, cmd *provider.Terraform, workspace string, lp executor.LogPersister) bool { + if workspace == "" { + return true + } + if err := cmd.SelectWorkspace(ctx, workspace); err != nil { + lp.Errorf("Failed to select workspace %q (%v). You might need to create the workspace before using by command %q", workspace, err, "terraform workspace new "+workspace) + return false + } + lp.Infof("Selected workspace %q", workspace) + return true +} + +func findTerraform(ctx context.Context, version string, lp executor.LogPersister) (string, bool) { + path, installed, err := toolregistry.DefaultRegistry().Terraform(ctx, version) + if err != nil { + lp.Errorf("Unable to find required terraform %q (%v)", version, err) + return "", false + } + if installed { + lp.Infof("Terraform %q has just been installed to %q because of no pre-installed binary for that version", version, path) + } + return path, true +} + +func findPlatformProvider(in *executor.Input) (cfg *config.PlatformProviderTerraformConfig, found bool) { + var name = in.Application.PlatformProvider + if name == "" { + in.LogPersister.Error("Missing the PlatformProvider name in the application configuration") + return + } + + cp, ok := in.PipedConfig.FindPlatformProvider(name, model.ApplicationKind_TERRAFORM) + if !ok { + in.LogPersister.Errorf("The specified platform provider %q was not found in piped configuration", name) + return + } + + cfg = cp.TerraformConfig + found = true + return +} diff --git a/pkg/app/pipedv1/executor/wait/wait.go b/pkg/app/pipedv1/executor/wait/wait.go new file mode 100644 index 0000000000..67682a5b90 --- /dev/null +++ b/pkg/app/pipedv1/executor/wait/wait.go @@ -0,0 +1,127 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wait + +import ( + "context" + "strconv" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + defaultDuration = time.Minute + logInterval = 10 * time.Second + startTimeKey = "startTime" +) + +type Executor struct { + executor.Input +} + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error +} + +// Register registers this executor factory into a given registerer. +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &Executor{ + Input: in, + } + } + r.Register(model.StageWait, f) +} + +// Execute starts waiting for the specified duration. +func (e *Executor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + originalStatus = e.Stage.Status + duration = defaultDuration + ) + + // Apply the stage configurations. + if opts := e.StageConfig.WaitStageOptions; opts != nil { + if opts.Duration > 0 { + duration = opts.Duration.Duration() + } + } + totalDuration := duration + + // Retrieve the saved startTime from the previous run. + startTime := e.retrieveStartTime() + if !startTime.IsZero() { + duration -= time.Since(startTime) + if duration < 0 { + duration = 0 + } + } else { + startTime = time.Now() + } + defer e.saveStartTime(sig.Context(), startTime) + + timer := time.NewTimer(duration) + defer timer.Stop() + + ticker := time.NewTicker(logInterval) + defer ticker.Stop() + + e.LogPersister.Infof("Waiting for %v...", duration) + for { + select { + case <-timer.C: + e.LogPersister.Infof("Waited for %v", totalDuration) + return model.StageStatus_STAGE_SUCCESS + + case <-ticker.C: + e.LogPersister.Infof("%v elapsed...", time.Since(startTime)) + + case s := <-sig.Ch(): + switch s { + case executor.StopSignalCancel: + return model.StageStatus_STAGE_CANCELLED + case executor.StopSignalTerminate: + return originalStatus + default: + return model.StageStatus_STAGE_FAILURE + } + } + } +} + +func (e *Executor) retrieveStartTime() (t time.Time) { + s, ok := e.MetadataStore.Stage(e.Stage.Id).Get(startTimeKey) + if !ok { + return + } + ut, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return + } + return time.Unix(ut, 0) +} + +func (e *Executor) saveStartTime(ctx context.Context, t time.Time) { + metadata := map[string]string{ + startTimeKey: strconv.FormatInt(t.Unix(), 10), + } + if err := e.MetadataStore.Stage(e.Stage.Id).PutMulti(ctx, metadata); err != nil { + e.Logger.Error("failed to store metadata", zap.Error(err)) + } +} diff --git a/pkg/app/pipedv1/executor/waitapproval/waitapproval.go b/pkg/app/pipedv1/executor/waitapproval/waitapproval.go new file mode 100644 index 0000000000..af4f031caf --- /dev/null +++ b/pkg/app/pipedv1/executor/waitapproval/waitapproval.go @@ -0,0 +1,200 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package waitapproval + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + approvedByKey = "ApprovedBy" +) + +type Executor struct { + executor.Input +} + +type registerer interface { + Register(stage model.Stage, f executor.Factory) error +} + +// Register registers this executor factory into a given registerer. +func Register(r registerer) { + f := func(in executor.Input) executor.Executor { + return &Executor{ + Input: in, + } + } + r.Register(model.StageWaitApproval, f) +} + +// Execute starts waiting until an approval from one of the specified users. +func (e *Executor) Execute(sig executor.StopSignal) model.StageStatus { + var ( + originalStatus = e.Stage.Status + ctx = sig.Context() + ticker = time.NewTicker(5 * time.Second) + ) + defer ticker.Stop() + timeout := e.StageConfig.WaitApprovalStageOptions.Timeout.Duration() + timer := time.NewTimer(timeout) + + e.reportRequiringApproval() + + num := e.StageConfig.WaitApprovalStageOptions.MinApproverNum + e.LogPersister.Infof("Waiting for approval from at least %d user(s)...", num) + for { + select { + case <-ticker.C: + if e.checkApproval(ctx, num) { + return model.StageStatus_STAGE_SUCCESS + } + + case s := <-sig.Ch(): + switch s { + case executor.StopSignalCancel: + return model.StageStatus_STAGE_CANCELLED + case executor.StopSignalTerminate: + return originalStatus + default: + return model.StageStatus_STAGE_FAILURE + } + case <-timer.C: + e.LogPersister.Errorf("Timed out %v", timeout) + return model.StageStatus_STAGE_FAILURE + } + } +} + +func (e *Executor) checkApproval(ctx context.Context, num int) bool { + var approveCmd *model.ReportableCommand + commands := e.CommandLister.ListCommands() + + for i, cmd := range commands { + if cmd.GetApproveStage() != nil { + approveCmd = &commands[i] + break + } + } + if approveCmd == nil { + return false + } + + reached := e.validateApproverNum(ctx, approveCmd.Commander, num) + if err := approveCmd.Report(ctx, model.CommandStatus_COMMAND_SUCCEEDED, nil, nil); err != nil { + e.Logger.Error("failed to report handled command", zap.Error(err)) + } + return reached +} + +func (e *Executor) reportApproved(approver string) { + users, groups, err := e.getApplicationNotificationMentions(model.NotificationEventType_EVENT_DEPLOYMENT_WAIT_APPROVAL) + if err != nil { + e.Logger.Error("failed to get the list of users or groups", zap.Error(err)) + } + + e.Notifier.Notify(model.NotificationEvent{ + Type: model.NotificationEventType_EVENT_DEPLOYMENT_APPROVED, + Metadata: &model.NotificationEventDeploymentApproved{ + Deployment: e.Deployment, + Approver: approver, + MentionedAccounts: users, + MentionedGroups: groups, + }, + }) +} + +func (e *Executor) reportRequiringApproval() { + users, groups, err := e.getApplicationNotificationMentions(model.NotificationEventType_EVENT_DEPLOYMENT_WAIT_APPROVAL) + if err != nil { + e.Logger.Error("failed to get the list of users or groups", zap.Error(err)) + } + + e.Notifier.Notify(model.NotificationEvent{ + Type: model.NotificationEventType_EVENT_DEPLOYMENT_WAIT_APPROVAL, + Metadata: &model.NotificationEventDeploymentWaitApproval{ + Deployment: e.Deployment, + MentionedAccounts: users, + MentionedGroups: groups, + }, + }) +} + +// getMentionedUsers returns the list of users groups who should be mentioned in the notification. +func (e *Executor) getApplicationNotificationMentions(event model.NotificationEventType) ([]string, []string, error) { + n, ok := e.MetadataStore.Shared().Get(model.MetadataKeyDeploymentNotification) + if !ok { + return []string{}, []string{}, nil + } + + var notification config.DeploymentNotification + if err := json.Unmarshal([]byte(n), ¬ification); err != nil { + return nil, nil, fmt.Errorf("could not extract mentions users and groups config: %w", err) + } + + return notification.FindSlackUsers(event), notification.FindSlackGroups(event), nil +} + +// validateApproverNum checks if number of approves is valid. +func (e *Executor) validateApproverNum(ctx context.Context, approver string, minApproverNum int) bool { + if minApproverNum == 1 { + if err := e.MetadataStore.Stage(e.Stage.Id).Put(ctx, approvedByKey, approver); err != nil { + e.LogPersister.Errorf("Unable to save approver information to deployment, %v", err) + } + e.LogPersister.Infof("Got approval from %q", approver) + e.reportApproved(approver) + e.LogPersister.Infof("This stage has been approved by %d user (%s)", minApproverNum, approver) + return true + } + + const delimiter = ", " + as, _ := e.MetadataStore.Stage(e.Stage.Id).Get(approvedByKey) + var approvedUsers []string + if as != "" { + approvedUsers = strings.Split(as, delimiter) + } + + for _, u := range approvedUsers { + if u == approver { + e.LogPersister.Infof("Approval from the same user (%s) will not be counted", approver) + return false + } + } + e.LogPersister.Infof("Got approval from %q", approver) + approvedUsers = append(approvedUsers, approver) + aus := strings.Join(approvedUsers, delimiter) + + if err := e.MetadataStore.Stage(e.Stage.Id).Put(ctx, approvedByKey, aus); err != nil { + e.LogPersister.Errorf("Unable to save approver information to deployment, %v", err) + } + if remain := minApproverNum - len(approvedUsers); remain > 0 { + e.LogPersister.Infof("Waiting for %d other approvers...", remain) + return false + } + e.reportApproved(aus) + e.LogPersister.Info("Received all needed approvals") + e.LogPersister.Infof("This stage has been approved by %d users (%s)", minApproverNum, aus) + return true +} diff --git a/pkg/app/pipedv1/executor/waitapproval/waitapproval_test.go b/pkg/app/pipedv1/executor/waitapproval/waitapproval_test.go new file mode 100644 index 0000000000..8c5a8c0cec --- /dev/null +++ b/pkg/app/pipedv1/executor/waitapproval/waitapproval_test.go @@ -0,0 +1,219 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package waitapproval + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/metadatastore" + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type fakeLogPersister struct{} + +func (l *fakeLogPersister) Write(_ []byte) (int, error) { return 0, nil } +func (l *fakeLogPersister) Info(_ string) {} +func (l *fakeLogPersister) Infof(_ string, _ ...interface{}) {} +func (l *fakeLogPersister) Success(_ string) {} +func (l *fakeLogPersister) Successf(_ string, _ ...interface{}) {} +func (l *fakeLogPersister) Error(_ string) {} +func (l *fakeLogPersister) Errorf(_ string, _ ...interface{}) {} + +type metadata map[string]string + +type fakeAPIClient struct { + shared metadata + stages map[string]metadata +} + +func (c *fakeAPIClient) SaveDeploymentMetadata(_ context.Context, req *pipedservice.SaveDeploymentMetadataRequest, _ ...grpc.CallOption) (*pipedservice.SaveDeploymentMetadataResponse, error) { + md := make(map[string]string, len(c.shared)+len(req.Metadata)) + for k, v := range c.shared { + md[k] = v + } + for k, v := range req.Metadata { + md[k] = v + } + c.shared = md + return &pipedservice.SaveDeploymentMetadataResponse{}, nil +} + +func (c *fakeAPIClient) SaveStageMetadata(_ context.Context, req *pipedservice.SaveStageMetadataRequest, _ ...grpc.CallOption) (*pipedservice.SaveStageMetadataResponse, error) { + ori := c.stages[req.StageId] + md := make(map[string]string, len(ori)+len(req.Metadata)) + for k, v := range ori { + md[k] = v + } + for k, v := range req.Metadata { + md[k] = v + } + c.stages[req.StageId] = md + return &pipedservice.SaveStageMetadataResponse{}, nil +} + +type fakeNotifier struct{} + +func (n *fakeNotifier) Notify(_ model.NotificationEvent) {} + +func TestValidateApproverNum(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + ac := &fakeAPIClient{ + shared: make(map[string]string, 0), + stages: make(map[string]metadata, 0), + } + testcases := []struct { + name string + approver string + minApproverNum int + executor *Executor + want bool + }{ + { + name: "return the person who just approved", + approver: "user-1", + minApproverNum: 0, + executor: &Executor{ + Input: executor.Input{ + Stage: &model.PipelineStage{ + Id: "stage-1", + }, + LogPersister: &fakeLogPersister{}, + MetadataStore: metadatastore.NewMetadataStore(ac, &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-1", + Metadata: map[string]string{}, + }, + }, + }), + Notifier: &fakeNotifier{}, + }, + }, + want: true, + }, + { + name: "return an empty string because number of current approver is not enough", + approver: "user-1", + minApproverNum: 2, + executor: &Executor{ + Input: executor.Input{ + Stage: &model.PipelineStage{ + Id: "stage-1", + }, + LogPersister: &fakeLogPersister{}, + MetadataStore: metadatastore.NewMetadataStore(ac, &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-1", + Metadata: map[string]string{}, + }, + }, + }), + }, + }, + want: false, + }, + { + name: "return an empty string because current approver is same as an approver in metadata", + approver: "user-1", + minApproverNum: 2, + executor: &Executor{ + Input: executor.Input{ + Stage: &model.PipelineStage{ + Id: "stage-1", + }, + LogPersister: &fakeLogPersister{}, + MetadataStore: metadatastore.NewMetadataStore(ac, &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-1", + Metadata: map[string]string{ + approvedByKey: "user-1", + }, + }, + }, + }), + Notifier: &fakeNotifier{}, + }, + }, + want: false, + }, + { + name: "return an empty string because number of current approver and approvers in metadata is not enough", + approver: "user-2", + minApproverNum: 3, + executor: &Executor{ + Input: executor.Input{ + Stage: &model.PipelineStage{ + Id: "stage-1", + }, + LogPersister: &fakeLogPersister{}, + MetadataStore: metadatastore.NewMetadataStore(ac, &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-1", + Metadata: map[string]string{ + approvedByKey: "user-1", + }, + }, + }, + }), + Notifier: &fakeNotifier{}, + }, + }, + want: false, + }, + { + name: "return all approvers", + approver: "user-2", + minApproverNum: 2, + executor: &Executor{ + Input: executor.Input{ + Stage: &model.PipelineStage{ + Id: "stage-1", + }, + LogPersister: &fakeLogPersister{}, + MetadataStore: metadatastore.NewMetadataStore(ac, &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-1", + Metadata: map[string]string{ + approvedByKey: "user-1", + }, + }, + }, + }), + Notifier: &fakeNotifier{}, + }, + }, + want: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got := tc.executor.validateApproverNum(ctx, tc.approver, tc.minApproverNum) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/livestatereporter/cloudrun/report.go b/pkg/app/pipedv1/livestatereporter/cloudrun/report.go new file mode 100644 index 0000000000..e2fbf7995a --- /dev/null +++ b/pkg/app/pipedv1/livestatereporter/cloudrun/report.go @@ -0,0 +1,133 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + ListByPlatformProvider(name string) []*model.Application +} + +type apiClient interface { + ReportApplicationLiveState(ctx context.Context, req *pipedservice.ReportApplicationLiveStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateResponse, error) + ReportApplicationLiveStateEvents(ctx context.Context, req *pipedservice.ReportApplicationLiveStateEventsRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateEventsResponse, error) +} + +type Reporter interface { + Run(ctx context.Context) error + ProviderName() string +} + +type reporter struct { + provider config.PipedPlatformProvider + appLister applicationLister + stateGetter cloudrun.Getter + apiClient apiClient + snapshotFlushInterval time.Duration + logger *zap.Logger + + snapshotVersions map[string]model.ApplicationLiveStateVersion +} + +func NewReporter(cp config.PipedPlatformProvider, appLister applicationLister, stateGetter cloudrun.Getter, apiClient apiClient, logger *zap.Logger) Reporter { + logger = logger.Named("cloudrun-reporter").With( + zap.String("platform-provider", cp.Name), + ) + return &reporter{ + provider: cp, + appLister: appLister, + stateGetter: stateGetter, + apiClient: apiClient, + snapshotFlushInterval: time.Minute, + logger: logger, + snapshotVersions: make(map[string]model.ApplicationLiveStateVersion), + } +} + +func (r *reporter) Run(ctx context.Context) error { + r.logger.Info("start running app live state reporter") + + r.logger.Info("waiting for livestatestore to be ready") + if err := r.stateGetter.WaitForReady(ctx, 10*time.Minute); err != nil { + r.logger.Error("livestatestore was unable to be ready in time", zap.Error(err)) + return err + } + + snapshotTicker := time.NewTicker(r.snapshotFlushInterval) + defer snapshotTicker.Stop() + + for { + select { + case <-snapshotTicker.C: + r.flushSnapshots(ctx) + + case <-ctx.Done(): + r.logger.Info("app live state reporter has been stopped") + return nil + } + } +} + +func (r *reporter) ProviderName() string { + return r.provider.Name +} + +func (r *reporter) flushSnapshots(ctx context.Context) { + apps := r.appLister.ListByPlatformProvider(r.provider.Name) + for _, app := range apps { + state, ok := r.stateGetter.GetState(app.Id) + if !ok { + r.logger.Info(fmt.Sprintf("no app state of cloudrun application %s to report", app.Id)) + continue + } + + snapshot := &model.ApplicationLiveStateSnapshot{ + ApplicationId: app.Id, + PipedId: app.PipedId, + ProjectId: app.ProjectId, + Kind: app.Kind, + Cloudrun: &model.CloudRunApplicationLiveState{ + Resources: state.Resources, + }, + Version: &state.Version, + } + snapshot.DetermineAppHealthStatus() + req := &pipedservice.ReportApplicationLiveStateRequest{ + Snapshot: snapshot, + } + + if _, err := r.apiClient.ReportApplicationLiveState(ctx, req); err != nil { + r.logger.Error("failed to report application live state", + zap.String("application-id", app.Id), + zap.Error(err), + ) + continue + } + r.snapshotVersions[app.Id] = state.Version + r.logger.Info(fmt.Sprintf("successfully reported application live state for application: %s", app.Id)) + } +} diff --git a/pkg/app/pipedv1/livestatereporter/kubernetes/reporter.go b/pkg/app/pipedv1/livestatereporter/kubernetes/reporter.go new file mode 100644 index 0000000000..65214a4da2 --- /dev/null +++ b/pkg/app/pipedv1/livestatereporter/kubernetes/reporter.go @@ -0,0 +1,184 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + maxNumEventsPerRequest = 1000 +) + +type applicationLister interface { + ListByPlatformProvider(name string) []*model.Application +} + +type apiClient interface { + ReportApplicationLiveState(ctx context.Context, req *pipedservice.ReportApplicationLiveStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateResponse, error) + ReportApplicationLiveStateEvents(ctx context.Context, req *pipedservice.ReportApplicationLiveStateEventsRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateEventsResponse, error) +} + +type Reporter interface { + Run(ctx context.Context) error + ProviderName() string +} + +type reporter struct { + provider config.PipedPlatformProvider + appLister applicationLister + stateGetter kubernetes.Getter + eventIterator kubernetes.EventIterator + apiClient apiClient + flushInterval time.Duration + snapshotFlushInterval time.Duration + logger *zap.Logger + + snapshotVersions map[string]model.ApplicationLiveStateVersion +} + +func NewReporter(cp config.PipedPlatformProvider, appLister applicationLister, stateGetter kubernetes.Getter, apiClient apiClient, logger *zap.Logger) Reporter { + logger = logger.Named("kubernetes-reporter").With( + zap.String("platform-provider", cp.Name), + ) + return &reporter{ + provider: cp, + appLister: appLister, + stateGetter: stateGetter, + eventIterator: stateGetter.NewEventIterator(), + apiClient: apiClient, + flushInterval: 5 * time.Second, + snapshotFlushInterval: 10 * time.Minute, + logger: logger, + snapshotVersions: make(map[string]model.ApplicationLiveStateVersion), + } +} + +func (r *reporter) Run(ctx context.Context) error { + r.logger.Info("start running app live state reporter") + + r.logger.Info("waiting for livestatestore to be ready") + if err := r.stateGetter.WaitForReady(ctx, 10*time.Minute); err != nil { + r.logger.Error("livestatestore was unable to be ready in time", zap.Error(err)) + return err + } + + // Do the first snapshot flushing after the statestore becomes ready. + r.flushSnapshots(ctx) + + snapshotTicker := time.NewTicker(r.snapshotFlushInterval) + defer snapshotTicker.Stop() + + ticker := time.NewTicker(r.flushInterval) + defer ticker.Stop() + + for { + select { + case <-snapshotTicker.C: + r.flushSnapshots(ctx) + + case <-ticker.C: + r.flushEvents(ctx) + + case <-ctx.Done(): + r.logger.Info("app live state reporter has been stopped") + return nil + } + } +} + +func (r *reporter) flushSnapshots(ctx context.Context) { + // TODO: In the future, maybe we should apply worker model for this or + // send multiple application states in one request. + apps := r.appLister.ListByPlatformProvider(r.provider.Name) + for _, app := range apps { + state, ok := r.stateGetter.GetKubernetesAppLiveState(app.Id) + if !ok { + r.logger.Info(fmt.Sprintf("no app state of kubernetes application %s to report", app.Id)) + continue + } + + snapshot := &model.ApplicationLiveStateSnapshot{ + ApplicationId: app.Id, + PipedId: app.PipedId, + ProjectId: app.ProjectId, + Kind: app.Kind, + Kubernetes: &model.KubernetesApplicationLiveState{ + Resources: state.Resources, + }, + Version: &state.Version, + } + snapshot.DetermineAppHealthStatus() + req := &pipedservice.ReportApplicationLiveStateRequest{ + Snapshot: snapshot, + } + + if _, err := r.apiClient.ReportApplicationLiveState(ctx, req); err != nil { + r.logger.Error("failed to report application live state", + zap.String("application-id", app.Id), + zap.Error(err), + ) + continue + } + r.snapshotVersions[app.Id] = state.Version + r.logger.Info(fmt.Sprintf("successfully reported application live state for application: %s", app.Id)) + } +} + +func (r *reporter) flushEvents(ctx context.Context) error { + events := r.eventIterator.Next(maxNumEventsPerRequest) + if len(events) == 0 { + return nil + } + + filteredEvents := make([]*model.KubernetesResourceStateEvent, 0, len(events)) + for i, event := range events { + snapshotVersion, ok := r.snapshotVersions[event.ApplicationId] + if ok && event.SnapshotVersion.IsBefore(snapshotVersion) { + continue + } + filteredEvents = append(filteredEvents, &events[i]) + } + if len(filteredEvents) == 0 { + return nil + } + + req := &pipedservice.ReportApplicationLiveStateEventsRequest{ + KubernetesEvents: filteredEvents, + } + if _, err := r.apiClient.ReportApplicationLiveStateEvents(ctx, req); err != nil { + r.logger.Error("failed to report application live state events", + zap.Error(err), + ) + return err + } + + r.logger.Info(fmt.Sprintf("successfully reported %d events about application live state", len(filteredEvents))) + return nil +} + +func (r *reporter) ProviderName() string { + return r.provider.Name +} diff --git a/pkg/app/pipedv1/livestatereporter/reporter.go b/pkg/app/pipedv1/livestatereporter/reporter.go new file mode 100644 index 0000000000..d2c8aff3f8 --- /dev/null +++ b/pkg/app/pipedv1/livestatereporter/reporter.go @@ -0,0 +1,111 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package livestatereporter provides a piped component +// that reports the changes as well as full snapshot about live state of registered applications. +package livestatereporter + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatereporter/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatereporter/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore" + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + ListByPlatformProvider(name string) []*model.Application +} + +type apiClient interface { + ReportApplicationLiveState(ctx context.Context, req *pipedservice.ReportApplicationLiveStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateResponse, error) + ReportApplicationLiveStateEvents(ctx context.Context, req *pipedservice.ReportApplicationLiveStateEventsRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateEventsResponse, error) +} + +type Reporter interface { + Run(ctx context.Context) error +} + +type reporter struct { + reporters []providerReporter + logger *zap.Logger +} + +type providerReporter interface { + Run(ctx context.Context) error + ProviderName() string +} + +func NewReporter(appLister applicationLister, stateGetter livestatestore.Getter, apiClient apiClient, cfg *config.PipedSpec, logger *zap.Logger) Reporter { + r := &reporter{ + reporters: make([]providerReporter, 0, len(cfg.PlatformProviders)), + logger: logger.Named("live-state-reporter"), + } + + const errFmt = "unable to find live state getter for platform provider: %s" + for _, cp := range cfg.PlatformProviders { + switch cp.Type { + case model.PlatformProviderKubernetes: + sg, ok := stateGetter.KubernetesGetter(cp.Name) + if !ok { + r.logger.Error(fmt.Sprintf(errFmt, cp.Name)) + continue + } + r.reporters = append(r.reporters, kubernetes.NewReporter(cp, appLister, sg, apiClient, logger)) + case model.PlatformProviderCloudRun: + sg, ok := stateGetter.CloudRunGetter(cp.Name) + if !ok { + r.logger.Error(fmt.Sprintf(errFmt, cp.Name)) + continue + } + r.reporters = append(r.reporters, cloudrun.NewReporter(cp, appLister, sg, apiClient, logger)) + } + } + + return r +} + +func (r *reporter) Run(ctx context.Context) error { + group, ctx := errgroup.WithContext(ctx) + + for i, reporter := range r.reporters { + reporter := reporter + // Avoid starting all reporters at the same time to reduce the API call burst. + time.Sleep(time.Duration(i) * 10 * time.Second) + r.logger.Info(fmt.Sprintf("starting app live state reporter for cloud provider: %s", reporter.ProviderName())) + + group.Go(func() error { + return reporter.Run(ctx) + }) + } + + r.logger.Info(fmt.Sprintf("all live state reporters of %d providers have been started", len(r.reporters))) + + if err := group.Wait(); err != nil { + r.logger.Error("failed while running", zap.Error(err)) + return err + } + + r.logger.Info(fmt.Sprintf("all live state reporters of %d providers have been stopped", len(r.reporters))) + return nil +} diff --git a/pkg/app/pipedv1/livestatestore/cloudrun/cloudrun.go b/pkg/app/pipedv1/livestatestore/cloudrun/cloudrun.go new file mode 100644 index 0000000000..c56cd8a120 --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/cloudrun/cloudrun.go @@ -0,0 +1,118 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "time" + + "go.uber.org/zap" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type Store struct { + store *store + logger *zap.Logger + interval time.Duration + firstSyncedCh chan error +} + +type Getter interface { + GetState(appID string) (State, bool) + GetServiceManifest(appID string) (provider.ServiceManifest, bool) + + WaitForReady(ctx context.Context, timeout time.Duration) error +} + +type State struct { + Resources []*model.CloudRunResourceState + Version model.ApplicationLiveStateVersion +} + +func NewStore(ctx context.Context, cfg *config.PlatformProviderCloudRunConfig, platformProvider string, logger *zap.Logger) (*Store, error) { + logger = logger.Named("cloudrun"). + With(zap.String("platform-provider", platformProvider)) + + client, err := provider.DefaultRegistry().Client(ctx, platformProvider, cfg, logger) + if err != nil { + return nil, err + } + + store := &Store{ + store: &store{ + client: client, + logger: logger.Named("store"), + }, + interval: 15 * time.Second, + logger: logger, + firstSyncedCh: make(chan error, 1), + } + + return store, nil +} + +func (s *Store) Run(ctx context.Context) error { + s.logger.Info("start running cloudrun app state store") + + tick := time.NewTicker(s.interval) + defer tick.Stop() + + // Run the first sync cloudrun services. + if err := s.store.run(ctx); err != nil { + s.firstSyncedCh <- err + return err + } + + s.logger.Info("successfully the first synced all cloudrun services") + close(s.firstSyncedCh) + + for { + select { + case <-ctx.Done(): + s.logger.Info("cloudrun app state store has been stopped") + return nil + + case <-tick.C: + if err := s.store.run(ctx); err != nil { + s.logger.Error("failed to sync cloudrun services", zap.Error(err)) + continue + } + s.logger.Info("successfully synced all cloudrun services") + } + } +} + +func (s *Store) GetServiceManifest(appID string) (provider.ServiceManifest, bool) { + return s.store.getServiceManifest(appID) +} + +func (s *Store) GetState(appID string) (State, bool) { + return s.store.getState(appID) +} + +func (s *Store) WaitForReady(ctx context.Context, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + select { + case <-ctx.Done(): + return nil + case err := <-s.firstSyncedCh: + return err + } +} diff --git a/pkg/app/pipedv1/livestatestore/cloudrun/store.go b/pkg/app/pipedv1/livestatestore/cloudrun/store.go new file mode 100644 index 0000000000..277ac1dae0 --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/cloudrun/store.go @@ -0,0 +1,170 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "fmt" + "time" + + "go.uber.org/atomic" + "go.uber.org/zap" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type store struct { + apps atomic.Value + logger *zap.Logger + client provider.Client +} + +type app struct { + service provider.ServiceManifest + // The states of service and all its active revsions which may handle the traffic. + states []*model.CloudRunResourceState + version model.ApplicationLiveStateVersion +} + +func (s *store) run(ctx context.Context) error { + svcs, err := s.fetchManagedServices(ctx) + if err != nil { + return fmt.Errorf("failed to fetch managed services: %w", err) + } + + revs := make(map[string][]*provider.Revision, len(svcs)) + for _, svc := range svcs { + id, ok := svc.UID() + if !ok { + continue + } + names := svc.ActiveRevisionNames() + rs, err := s.fetchActiveRevisions(ctx, names) + if err != nil { + return fmt.Errorf("failed to fetch active revisions: %w", err) + } + if len(rs) == 0 { + continue + } + revs[id] = rs + } + + // Update apps to the latest. + apps := s.buildAppMap(svcs, revs) + s.apps.Store(apps) + + return nil +} + +func (s *store) buildAppMap(svcs []*provider.Service, revs map[string][]*provider.Revision) map[string]app { + apps, now := make(map[string]app, len(svcs)), time.Now() + version := model.ApplicationLiveStateVersion{ + Timestamp: now.Unix(), + } + for _, svc := range svcs { + sm, err := svc.ServiceManifest() + if err != nil { + s.logger.Error("failed to load cloudrun service into service manifest", zap.Error(err)) + continue + } + + appID, ok := sm.AppID() + if !ok { + continue + } + + id, _ := svc.UID() + apps[appID] = app{ + service: sm, + states: provider.MakeResourceStates(svc, revs[id], now), + version: version, + } + } + return apps +} + +func (s *store) fetchManagedServices(ctx context.Context) ([]*provider.Service, error) { + const maxLimit = 500 + var cursor string + svcs := make([]*provider.Service, 0, maxLimit) + for { + ops := &provider.ListOptions{ + Limit: maxLimit, + LabelSelector: provider.MakeManagedByPipedSelector(), + Cursor: cursor, + } + // Cloud Run Admin API rate Limits. + // https://cloud.google.com/run/quotas#api + v, next, err := s.client.List(ctx, ops) + if err != nil { + return nil, err + } + svcs = append(svcs, v...) + if next == "" { + break + } + cursor = next + } + return svcs, nil +} + +func (s *store) fetchActiveRevisions(ctx context.Context, names []string) ([]*provider.Revision, error) { + ops := &provider.ListRevisionsOptions{ + LabelSelector: provider.MakeRevisionNamesSelector(names), + } + v, _, err := s.client.ListRevisions(ctx, ops) + return v, err +} + +func (s *store) loadApps() map[string]app { + apps := s.apps.Load() + if apps == nil { + return nil + } + return apps.(map[string]app) +} + +func (s *store) getServiceManifest(appID string) (provider.ServiceManifest, bool) { + apps := s.loadApps() + if apps == nil { + return provider.ServiceManifest{}, false + } + + app, ok := apps[appID] + if !ok { + return provider.ServiceManifest{}, false + } + + return app.service, true +} + +func (s *store) getState(appID string) (State, bool) { + apps := s.loadApps() + if apps == nil { + return State{}, false + } + + app, ok := apps[appID] + if !ok { + return State{}, false + } + + state := State{ + Resources: app.states, + Version: app.version, + } + return state, true +} diff --git a/pkg/app/pipedv1/livestatestore/ecs/store.go b/pkg/app/pipedv1/livestatestore/ecs/store.go new file mode 100644 index 0000000000..95a63ebdc7 --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/ecs/store.go @@ -0,0 +1,51 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "context" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + List() []*model.Application +} + +type Store struct { + logger *zap.Logger +} + +type Getter interface { +} + +func NewStore(cfg *config.PlatformProviderECSConfig, platformProvider string, appLister applicationLister, logger *zap.Logger) *Store { + logger = logger.Named("ecs"). + With(zap.String("platform-provider", platformProvider)) + + return &Store{ + logger: logger, + } +} + +func (s *Store) Run(ctx context.Context) error { + s.logger.Info("start running ecs app state store") + + s.logger.Info("ecs app state store has been stopped") + return nil +} diff --git a/pkg/app/pipedv1/livestatestore/kubernetes/appnodes.go b/pkg/app/pipedv1/livestatestore/kubernetes/appnodes.go new file mode 100644 index 0000000000..341479244d --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/kubernetes/appnodes.go @@ -0,0 +1,196 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "sync" + "time" + + "github.com/google/uuid" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type appNodes struct { + appID string + managingNodes map[string]node + dependedNodes map[string]node + version model.ApplicationLiveStateVersion + mu sync.RWMutex +} + +type node struct { + // The unique identifier of the resource generated by Kubernetes. + uid string + appID string + key provider.ResourceKey + unstructured *unstructured.Unstructured + state model.KubernetesResourceState +} + +func (n node) Manifest() provider.Manifest { + return provider.MakeManifest(n.key, n.unstructured) +} + +func (a *appNodes) addManagingResource(uid string, key provider.ResourceKey, obj *unstructured.Unstructured, now time.Time) (model.KubernetesResourceStateEvent, bool) { + // Some resources in Kubernetes (e.g. Deployment) are producing multiple keys + // for the same uid. So we use the configured original API version to ignore them. + originalAPIVersion := obj.GetAnnotations()[provider.LabelOriginalAPIVersion] + if originalAPIVersion != key.APIVersion { + return model.KubernetesResourceStateEvent{}, false + } + + n := node{ + uid: uid, + appID: a.appID, + key: key, + unstructured: obj, + state: provider.MakeKubernetesResourceState(uid, key, obj, now), + } + + a.mu.Lock() + oriNode, hasOriNode := a.managingNodes[uid] + version := a.version + a.managingNodes[uid] = n + a.updateVersion(now) + a.mu.Unlock() + + // No diff compared to previous state. + if hasOriNode && !oriNode.state.HasDiff(n.state) { + return model.KubernetesResourceStateEvent{}, false + } + + return model.KubernetesResourceStateEvent{ + Id: uuid.New().String(), + ApplicationId: a.appID, + Type: model.KubernetesResourceStateEvent_ADD_OR_UPDATED, + State: &n.state, + SnapshotVersion: &version, + CreatedAt: now.Unix(), + }, true +} + +func (a *appNodes) deleteManagingResource(uid string, _ provider.ResourceKey, now time.Time) (model.KubernetesResourceStateEvent, bool) { + a.mu.Lock() + n, ok := a.managingNodes[uid] + if !ok { + a.mu.Unlock() + return model.KubernetesResourceStateEvent{}, false + } + + version := a.version + delete(a.managingNodes, uid) + a.updateVersion(now) + a.mu.Unlock() + + return model.KubernetesResourceStateEvent{ + Id: uuid.New().String(), + ApplicationId: a.appID, + Type: model.KubernetesResourceStateEvent_DELETED, + State: &n.state, + SnapshotVersion: &version, + CreatedAt: now.Unix(), + }, true +} + +func (a *appNodes) addDependedResource(uid string, key provider.ResourceKey, obj *unstructured.Unstructured, now time.Time) (model.KubernetesResourceStateEvent, bool) { + n := node{ + uid: uid, + appID: a.appID, + key: key, + unstructured: obj, + state: provider.MakeKubernetesResourceState(uid, key, obj, now), + } + + a.mu.Lock() + oriNode, hasOriNode := a.dependedNodes[uid] + version := a.version + a.dependedNodes[uid] = n + a.updateVersion(now) + a.mu.Unlock() + + // No diff compared to previous state. + if hasOriNode && !oriNode.state.HasDiff(n.state) { + return model.KubernetesResourceStateEvent{}, false + } + + return model.KubernetesResourceStateEvent{ + Id: uuid.New().String(), + ApplicationId: a.appID, + Type: model.KubernetesResourceStateEvent_ADD_OR_UPDATED, + State: &n.state, + SnapshotVersion: &version, + CreatedAt: now.Unix(), + }, true +} + +func (a *appNodes) deleteDependedResource(uid string, _ provider.ResourceKey, now time.Time) (model.KubernetesResourceStateEvent, bool) { + a.mu.Lock() + n, ok := a.dependedNodes[uid] + if !ok { + a.mu.Unlock() + return model.KubernetesResourceStateEvent{}, false + } + + version := a.version + delete(a.dependedNodes, uid) + a.updateVersion(now) + a.mu.Unlock() + + return model.KubernetesResourceStateEvent{ + Id: uuid.New().String(), + ApplicationId: a.appID, + Type: model.KubernetesResourceStateEvent_DELETED, + State: &n.state, + SnapshotVersion: &version, + CreatedAt: now.Unix(), + }, true +} + +func (a *appNodes) getManagingNodes() map[string]node { + a.mu.RLock() + defer a.mu.RUnlock() + + return a.managingNodes +} + +func (a *appNodes) getNodes() (map[string]node, model.ApplicationLiveStateVersion) { + a.mu.RLock() + defer a.mu.RUnlock() + + var ( + version = a.version + nodes = make(map[string]node, len(a.managingNodes)+len(a.dependedNodes)) + ) + for k, n := range a.dependedNodes { + nodes[k] = n + } + for k, n := range a.managingNodes { + nodes[k] = n + } + return nodes, version +} + +func (a *appNodes) updateVersion(now time.Time) { + if a.version.Timestamp == now.Unix() { + a.version.Index++ + return + } + + a.version.Timestamp = now.Unix() + a.version.Index = 0 +} diff --git a/pkg/app/pipedv1/livestatestore/kubernetes/kubernetes.go b/pkg/app/pipedv1/livestatestore/kubernetes/kubernetes.go new file mode 100644 index 0000000000..8279068dca --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/kubernetes/kubernetes.go @@ -0,0 +1,152 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "time" + + "go.uber.org/zap" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + // Import to load the needs plugins such as gcp, azure, oidc, openstack. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type Store struct { + config *config.PlatformProviderKubernetesConfig + pipedConfig *config.PipedSpec + kubeConfig *restclient.Config + store *store + watchingResourceKinds []provider.APIVersionKind + firstSyncedCh chan error + logger *zap.Logger +} + +type Getter interface { + GetKubernetesAppLiveState(appID string) (AppState, bool) + NewEventIterator() EventIterator + + GetWatchingResourceKinds() []provider.APIVersionKind + GetAppLiveManifests(appID string) []provider.Manifest + + WaitForReady(ctx context.Context, timeout time.Duration) error +} + +type AppState struct { + Resources []*model.KubernetesResourceState + Version model.ApplicationLiveStateVersion +} + +type EventIterator struct { + id int + store *store +} + +func (it EventIterator) Next(maxNum int) []model.KubernetesResourceStateEvent { + return it.store.nextEvents(it.id, maxNum) +} + +func NewStore(cfg *config.PlatformProviderKubernetesConfig, pipedConfig *config.PipedSpec, platformProvider string, logger *zap.Logger) *Store { + logger = logger.Named("kubernetes"). + With(zap.String("platform-provider", platformProvider)) + + return &Store{ + config: cfg, + pipedConfig: pipedConfig, + store: &store{ + pipedConfig: pipedConfig, + apps: make(map[string]*appNodes), + resources: make(map[string]appResource), + iterators: make(map[int]int, 1), + logger: logger.Named("store"), + }, + firstSyncedCh: make(chan error, 1), + logger: logger, + } +} + +func (s *Store) Run(ctx context.Context) error { + s.logger.Info("start running kubernetes app state store") + + // Build kubeconfig for initialing kubernetes clients later. + var err error + s.kubeConfig, err = clientcmd.BuildConfigFromFlags(s.config.MasterURL, s.config.KubeConfigPath) + if err != nil { + s.logger.Error("failed to build kube config", zap.Error(err)) + return err + } + + stopCh := make(chan struct{}) + rf := reflector{ + config: s.config, + kubeConfig: s.kubeConfig, + pipedConfig: s.pipedConfig, + onAdd: s.store.onAddResource, + onUpdate: s.store.onUpdateResource, + onDelete: s.store.onDeleteResource, + stopCh: stopCh, + logger: s.logger.Named("reflector"), + } + if err := rf.start(ctx); err != nil { + s.firstSyncedCh <- err + return err + } + s.watchingResourceKinds = rf.watchingResourceKinds + s.logger.Info("the reflector has done the first sync") + + s.store.initialize() + s.logger.Info("the store has done the initializing") + close(s.firstSyncedCh) + + <-ctx.Done() + close(stopCh) + + s.logger.Info("kubernetes app state store has been stopped") + return nil +} + +func (s *Store) WaitForReady(ctx context.Context, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + select { + case <-ctx.Done(): + return nil + case err := <-s.firstSyncedCh: + return err + } +} + +func (s *Store) GetKubernetesAppLiveState(appID string) (AppState, bool) { + return s.store.getAppLiveState(appID) +} + +func (s *Store) NewEventIterator() EventIterator { + return s.store.newEventIterator() +} + +func (s *Store) GetWatchingResourceKinds() []provider.APIVersionKind { + return s.watchingResourceKinds +} + +func (s *Store) GetAppLiveManifests(appID string) []provider.Manifest { + return s.store.GetAppLiveManifests(appID) +} diff --git a/pkg/app/pipedv1/livestatestore/kubernetes/kubernetesmetrics/metrics.go b/pkg/app/pipedv1/livestatestore/kubernetes/kubernetesmetrics/metrics.go new file mode 100644 index 0000000000..0d4122e89d --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/kubernetes/kubernetesmetrics/metrics.go @@ -0,0 +1,99 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetesmetrics + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/tools/metrics" +) + +const ( + hostKey = "host" + methodKey = "method" + codeKey = "code" + eventKey = "event" + eventHandledKey = "handled" +) + +type EventKind string + +const ( + LabelEventAdd EventKind = "add" + LabelEventUpdate EventKind = "update" + LabelEventDelete EventKind = "delete" +) + +type EventHandledVal string + +const ( + LabelEventHandled EventHandledVal = "true" + LabelEventNotYetHandled EventHandledVal = "false" +) + +var ( + apiRequestsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "livestatestore_kubernetes_api_requests_total", + Help: "Number of requests sent to kubernetes api server.", + }, + []string{ + hostKey, + methodKey, + codeKey, + }, + ) + resourceEventsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "livestatestore_kubernetes_resource_events_total", + Help: "Number of resource events received from kubernetes server.", + }, + []string{ + eventKey, + eventHandledKey, + }, + ) +) + +func Register(r prometheus.Registerer) { + r.MustRegister( + apiRequestsCounter, + resourceEventsCounter, + ) + + opts := metrics.RegisterOpts{ + RequestResult: requestResultCollector{}, + } + metrics.Register(opts) +} + +type requestResultCollector struct { +} + +func (c requestResultCollector) Increment(ctx context.Context, code string, method string, host string) { + apiRequestsCounter.With(prometheus.Labels{ + hostKey: host, + methodKey: method, + codeKey: code, + }).Inc() +} + +func IncResourceEventsCounter(event EventKind, handled EventHandledVal) { + resourceEventsCounter.With(prometheus.Labels{ + eventKey: string(event), + eventHandledKey: string(handled), + }).Inc() +} diff --git a/pkg/app/pipedv1/livestatestore/kubernetes/reflector.go b/pkg/app/pipedv1/livestatestore/kubernetes/reflector.go new file mode 100644 index 0000000000..c77b45ed02 --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/kubernetes/reflector.go @@ -0,0 +1,425 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/kubernetes/kubernetesmetrics" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" +) + +var ( + // This is the default whitelist of resources that should be watched. + // User can add/remove other resources to be watched in piped config at cloud provider part. + groupWhitelist = map[string]struct{}{ + "": {}, + "apps": {}, + "extensions": {}, + "batch": {}, + "storage.k8s.io": {}, + "autoscaling": {}, + "networking.k8s.io": {}, + "apiextensions.k8s.io": {}, + "rbac.authorization.k8s.io": {}, + "policy": {}, + "apiregistration.k8s.io": {}, + "authorization.k8s.io": {}, + } + versionWhitelist = map[string]struct{}{ + "v1": {}, + "v1beta1": {}, + "v1beta2": {}, + "v2": {}, + } + kindWhitelist = map[string]struct{}{ + "Service": {}, + "Endpoints": {}, + "Deployment": {}, + "DaemonSet": {}, + "StatefulSet": {}, + "ReplicationController": {}, + "ReplicaSet": {}, + "Pod": {}, + "Job": {}, + "CronJob": {}, + "ConfigMap": {}, + "Secret": {}, + "Ingress": {}, + "NetworkPolicy": {}, + "StorageClass": {}, + "PersistentVolume": {}, + "PersistentVolumeClaim": {}, + "HorizontalPodAutoscaler": {}, + "ServiceAccount": {}, + "Role": {}, + "RoleBinding": {}, + "ClusterRole": {}, + "ClusterRoleBinding": {}, + "CustomResourceDefinition": {}, + "PodDisruptionBudget": {}, + "PodSecurityPolicy": {}, + "APIService": {}, + "LocalSubjectAccessReview": {}, + "SelfSubjectAccessReview": {}, + "SelfSubjectRulesReview": {}, + "SubjectAccessReview": {}, + "ResourceQuota": {}, + "PodTemplate": {}, + "IngressClass": {}, + "Namespace": {}, + } + ignoreResourceKeys = map[string]struct{}{ + "v1:Service:default:kubernetes": {}, + "v1:Service:kube-system:heapster": {}, + "v1:Service:kube-system:metrics-server": {}, + "v1:Service:kube-system:kube-dns": {}, + "v1:Service:kube-system:kubernetes-dashboard": {}, + "v1:Service:kube-system:default-http-backend": {}, + + "apps/v1:Deployment:kube-system:kube-dns": {}, + "apps/v1:Deployment:kube-system:kube-dns-autoscaler": {}, + "apps/v1:Deployment:kube-system:fluentd-gcp-scaler": {}, + "apps/v1:Deployment:kube-system:kubernetes-dashboard": {}, + "apps/v1:Deployment:kube-system:l7-default-backend": {}, + "apps/v1:Deployment:kube-system:heapster-gke": {}, + "apps/v1:Deployment:kube-system:stackdriver-metadata-agent-cluster-level": {}, + + "extensions/v1beta1:Deployment:kube-system:kube-dns": {}, + "extensions/v1beta1:Deployment:kube-system:kube-dns-autoscaler": {}, + "extensions/v1beta1:Deployment:kube-system:fluentd-gcp-scaler": {}, + "extensions/v1beta1:Deployment:kube-system:kubernetes-dashboard": {}, + "extensions/v1beta1:Deployment:kube-system:l7-default-backend": {}, + "extensions/v1beta1:Deployment:kube-system:heapster-gke": {}, + "extensions/v1beta1:Deployment:kube-system:stackdriver-metadata-agent-cluster-level": {}, + + "v1:Endpoints:kube-system:kube-controller-manager": {}, + "v1:Endpoints:kube-system:kube-scheduler": {}, + "v1:Endpoints:kube-system:vpa-recommender": {}, + "v1:Endpoints:kube-system:gcp-controller-manager": {}, + "v1:Endpoints:kube-system:managed-certificate-controller": {}, + "v1:Endpoints:kube-system:cluster-autoscaler": {}, + + "v1:ConfigMap:kube-system:cluster-kubestore": {}, + "v1:ConfigMap:kube-system:ingress-gce-lock": {}, + "v1:ConfigMap:kube-system:gke-common-webhook-lock": {}, + "v1:ConfigMap:kube-system:cluster-autoscaler-status": {}, + + "rbac.authorization.k8s.io/v1:ClusterRole::system:managed-certificate-controller": {}, + "rbac.authorization.k8s.io/v1:ClusterRoleBinding::system:managed-certificate-controller": {}, + } +) + +// reflector watches the live state of application with the cluster +// and triggers the specified callbacks. +type reflector struct { + config *config.PlatformProviderKubernetesConfig + kubeConfig *restclient.Config + pipedConfig *config.PipedSpec + + onAdd func(obj *unstructured.Unstructured) + onUpdate func(oldObj, obj *unstructured.Unstructured) + onDelete func(obj *unstructured.Unstructured) + + watchingResourceKinds []provider.APIVersionKind + stopCh chan struct{} + logger *zap.Logger +} + +func (r *reflector) start(_ context.Context) error { + matcher := newResourceMatcher(r.config.AppStateInformer) + + // Use discovery to discover APIs supported by the Kubernetes API server. + // This should be run periodically with a low rate because the APIs are not added frequently. + // https://godoc.org/k8s.io/client-go/discovery + discoveryClient, err := discovery.NewDiscoveryClientForConfig(r.kubeConfig) + if err != nil { + return fmt.Errorf("failed to create discovery client: %v", err) + } + groupResources, err := discoveryClient.ServerPreferredResources() + if err != nil { + return fmt.Errorf("failed to fetch preferred resources: %v", err) + } + r.logger.Info(fmt.Sprintf("successfully preferred resources that contains for %d groups", len(groupResources))) + + // Filter above APIResources. + var ( + targetResources = make([]schema.GroupVersionResource, 0) + namespacedTargetResources = make([]schema.GroupVersionResource, 0) + ) + for _, gr := range groupResources { + for _, resource := range gr.APIResources { + gvk := schema.FromAPIVersionAndKind(gr.GroupVersion, resource.Kind) + if !matcher.Match(gvk) { + r.logger.Info(fmt.Sprintf("skip watching %v because of not matching the configured list", gvk)) + continue + } + + if !isSupportedList(resource) || !isSupportedWatch(resource) { + r.logger.Info(fmt.Sprintf("skip watching %v because of not supporting watch or list verb", gvk)) + continue + } + + gv := gvk.GroupVersion() + r.watchingResourceKinds = append(r.watchingResourceKinds, provider.APIVersionKind{ + APIVersion: gv.String(), + Kind: gvk.Kind, + }) + target := gv.WithResource(resource.Name) + if resource.Namespaced { + namespacedTargetResources = append(namespacedTargetResources, target) + } else { + targetResources = append(targetResources, target) + } + } + } + r.logger.Info("filtered target resources", + zap.Any("targetResources", targetResources), + zap.Any("namespacedTargetResources", namespacedTargetResources), + ) + + // Use dynamic to perform generic operations on arbitrary Kubernets API objects. + // https://godoc.org/k8s.io/client-go/dynamic + dynamicClient, err := dynamic.NewForConfig(r.kubeConfig) + if err != nil { + return fmt.Errorf("failed to create dynamic client: %v", err) + } + + stopCh := make(chan struct{}) + + startInformer := func(namespace string, resources []schema.GroupVersionResource) { + factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicClient, 30*time.Minute, namespace, nil) + for _, tr := range resources { + di := factory.ForResource(tr).Informer() + di.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: r.onObjectAdd, + UpdateFunc: r.onObjectUpdate, + DeleteFunc: r.onObjectDelete, + }) + go di.Run(r.stopCh) + if cache.WaitForCacheSync(stopCh, di.HasSynced) { + r.logger.Info(fmt.Sprintf("informer cache for %v has been synced", tr)) + } else { + // TODO: Handle the case informer cache has not been synced correctly. + r.logger.Info(fmt.Sprintf("informer cache for %v has not been synced correctly", tr)) + } + } + } + + ns := r.config.AppStateInformer.Namespace + if ns == "" { + ns = metav1.NamespaceAll + } + r.logger.Info(fmt.Sprintf("start running %d namespaced-resource informers", len(namespacedTargetResources))) + startInformer(ns, namespacedTargetResources) + + if ns == metav1.NamespaceAll { + r.logger.Info(fmt.Sprintf("start running %d non-namespaced-resource informers", len(targetResources))) + startInformer(metav1.NamespaceAll, targetResources) + } + + r.logger.Info("all informer caches have been synced") + return nil +} + +func (r *reflector) onObjectAdd(obj interface{}) { + u := obj.(*unstructured.Unstructured) + key := provider.MakeResourceKey(u) + + // Ignore all predefined ones. + if _, ok := ignoreResourceKeys[key.String()]; ok { + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventAdd, + kubernetesmetrics.LabelEventNotYetHandled, + ) + return + } + + // Ignore all objects that are not handled by this piped. + pipedID := u.GetAnnotations()[provider.LabelPiped] + if pipedID != "" && pipedID != r.pipedConfig.PipedID { + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventAdd, + kubernetesmetrics.LabelEventNotYetHandled, + ) + return + } + + r.logger.Debug(fmt.Sprintf("received add event for %s", key.String())) + r.onAdd(u) + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventAdd, + kubernetesmetrics.LabelEventHandled, + ) +} + +func (r *reflector) onObjectUpdate(oldObj, obj interface{}) { + u := obj.(*unstructured.Unstructured) + oldU := oldObj.(*unstructured.Unstructured) + + // Ignore all predefined ones. + key := provider.MakeResourceKey(u) + if _, ok := ignoreResourceKeys[key.String()]; ok { + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventUpdate, + kubernetesmetrics.LabelEventNotYetHandled, + ) + return + } + + // Ignore all objects that are not handled by this piped. + pipedID := u.GetAnnotations()[provider.LabelPiped] + if pipedID != "" && pipedID != r.pipedConfig.PipedID { + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventUpdate, + kubernetesmetrics.LabelEventNotYetHandled, + ) + return + } + + r.logger.Debug(fmt.Sprintf("received update event for %s", key.String())) + r.onUpdate(oldU, u) + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventUpdate, + kubernetesmetrics.LabelEventHandled, + ) +} + +func (r *reflector) onObjectDelete(obj interface{}) { + u := obj.(*unstructured.Unstructured) + key := provider.MakeResourceKey(u) + + // Ignore all predefined ones. + if _, ok := ignoreResourceKeys[key.String()]; ok { + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventDelete, + kubernetesmetrics.LabelEventNotYetHandled, + ) + return + } + + // Ignore all objects that are not handled by this piped. + pipedID := u.GetAnnotations()[provider.LabelPiped] + if pipedID != "" && pipedID != r.pipedConfig.PipedID { + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventDelete, + kubernetesmetrics.LabelEventNotYetHandled, + ) + return + } + + r.logger.Debug(fmt.Sprintf("received delete event for %s", key.String())) + r.onDelete(u) + kubernetesmetrics.IncResourceEventsCounter( + kubernetesmetrics.LabelEventDelete, + kubernetesmetrics.LabelEventHandled, + ) +} + +func isSupportedWatch(r metav1.APIResource) bool { + for _, v := range r.Verbs { + if v == "watch" { + return true + } + } + return false +} + +func isSupportedList(r metav1.APIResource) bool { + for _, v := range r.Verbs { + if v == "list" { + return true + } + } + return false +} + +type resourceMatcher struct { + includes map[string]struct{} + excludes map[string]struct{} +} + +func newResourceMatcher(cfg config.KubernetesAppStateInformer) *resourceMatcher { + r := &resourceMatcher{ + includes: make(map[string]struct{}, len(cfg.IncludeResources)), + excludes: make(map[string]struct{}, len(cfg.ExcludeResources)), + } + + for _, m := range cfg.IncludeResources { + if m.Kind == "" { + r.includes[m.APIVersion] = struct{}{} + } else { + r.includes[m.APIVersion+":"+m.Kind] = struct{}{} + } + } + for _, m := range cfg.ExcludeResources { + if m.Kind == "" { + r.excludes[m.APIVersion] = struct{}{} + } else { + r.excludes[m.APIVersion+":"+m.Kind] = struct{}{} + } + } + return r +} + +func (m *resourceMatcher) Match(gvk schema.GroupVersionKind) bool { + var ( + gv = gvk.GroupVersion() + apiVersion = gv.String() + key = apiVersion + ":" + gvk.Kind + ) + + // Any resource matches the specified ExcludeResources will be ignored. + if _, ok := m.excludes[apiVersion]; ok { + return false + } + if _, ok := m.excludes[key]; ok { + return false + } + + // Any resources matches the specified IncludeResources will be included. + if _, ok := m.includes[apiVersion]; ok { + return true + } + if _, ok := m.includes[key]; ok { + return true + } + + // Check the predefined list. + if _, ok := kindWhitelist[gvk.Kind]; !ok { + return false + } + if _, ok := groupWhitelist[gv.Group]; !ok { + return false + } + if _, ok := versionWhitelist[gv.Version]; !ok { + return false + } + + return true +} diff --git a/pkg/app/pipedv1/livestatestore/kubernetes/reflector_test.go b/pkg/app/pipedv1/livestatestore/kubernetes/reflector_test.go new file mode 100644 index 0000000000..99fa9b0dbc --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/kubernetes/reflector_test.go @@ -0,0 +1,89 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/pipe-cd/pipecd/pkg/config" +) + +func TestResourceMatcher(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + cfg config.KubernetesAppStateInformer + gvks map[schema.GroupVersionKind]bool + }{ + { + name: "empty config", + cfg: config.KubernetesAppStateInformer{}, + gvks: map[schema.GroupVersionKind]bool{ + {"pipecd.dev", "v1beta1", "Foo"}: false, + {"", "v1", "Foo"}: false, + {"", "v1", "Service"}: true, + {"networking.k8s.io", "v1", "Ingress"}: true, + }, + }, + { + name: "include config", + cfg: config.KubernetesAppStateInformer{ + IncludeResources: []config.KubernetesResourceMatcher{ + {APIVersion: "pipecd.dev/v1beta1"}, + {APIVersion: "pipecd.dev/v1alpha1", Kind: "Foo"}, + }, + }, + gvks: map[schema.GroupVersionKind]bool{ + {"pipecd.dev", "v1beta1", "Foo"}: true, + {"pipecd.dev", "v1alpha1", "Foo"}: true, + {"pipecd.dev", "v1alpha1", "Bar"}: false, + }, + }, + { + name: "exclude config", + cfg: config.KubernetesAppStateInformer{ + ExcludeResources: []config.KubernetesResourceMatcher{ + {APIVersion: "networking.k8s.io/v1"}, + {APIVersion: "apps/v1", Kind: "Deployment"}, + }, + }, + gvks: map[schema.GroupVersionKind]bool{ + {"apps", "v1", "ReplicaSet"}: true, + {"apps", "v1", "Deployment"}: false, + {"networking.k8s.io", "v1", "Ingress"}: false, + }, + }, + } + + for _, tc := range testcases { + tc := tc + m := newResourceMatcher(tc.cfg) + for gvk, expected := range tc.gvks { + desc := fmt.Sprintf("%s: %v", tc.name, gvk) + gvk, expected := gvk, expected + t.Run(desc, func(t *testing.T) { + t.Parallel() + + matched := m.Match(gvk) + assert.Equal(t, expected, matched) + }) + } + } +} diff --git a/pkg/app/pipedv1/livestatestore/kubernetes/store.go b/pkg/app/pipedv1/livestatestore/kubernetes/store.go new file mode 100644 index 0000000000..e9ae9699df --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/kubernetes/store.go @@ -0,0 +1,391 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "sync" + "time" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + eventCacheSize = 900 + eventCacheMaxSize = 1000 + eventCacheCleanOffset = 50 +) + +type store struct { + pipedConfig *config.PipedSpec + apps map[string]*appNodes + // The map with the key is "resource's uid" and the value is "appResource". + // Because the depended resource does not include the appID in its annotations + // so this is used to determine the application of a depended resource. + resources map[string]appResource + mu sync.RWMutex + + events []model.KubernetesResourceStateEvent + iterators map[int]int + nextIteratorID int + eventMu sync.Mutex + + logger *zap.Logger +} + +type appResource struct { + appID string + owners []metav1.OwnerReference + resource *unstructured.Unstructured +} + +func (s *store) initialize() { + s.mu.Lock() + defer s.mu.Unlock() + + now := time.Now() + // Try to determine the application ID of all resources. + for uid, an := range s.resources { + // Resource has already assigned into an application. + if an.appID != "" { + continue + } + appID := s.findAppIDByOwners(an.owners) + if appID == "" { + continue + } + + // Add the missing resource into the dependedResources of the app. + key := provider.MakeResourceKey(an.resource) + + // Ignore in case appNodes with appID not existed in store. + if s.apps[appID] == nil { + s.logger.Info("detected an unexpected missing resource", + zap.String("app", appID), + zap.Any("key", key), + ) + continue + } + + s.apps[appID].addDependedResource(uid, key, an.resource, now) + an.appID = appID + s.resources[uid] = an + } + + // Remove all resources which do not have appID. + for uid, an := range s.resources { + if an.appID == "" { + delete(s.resources, uid) + } + } + + // Clean all initial events. + s.events = nil +} + +func (s *store) addResource(obj *unstructured.Unstructured, appID string) { + var ( + uid = string(obj.GetUID()) + key = provider.MakeResourceKey(obj) + owners = obj.GetOwnerReferences() + now = time.Now() + ) + + // If this is a resource managed by PipeCD + // it must contain appID in its annotations and has no owners. + if appID != "" && len(owners) == 0 { + // When this obj is for a new application + // we register a new application to the apps. + s.mu.Lock() + app, ok := s.apps[appID] + if !ok { + app = &appNodes{ + appID: appID, + managingNodes: make(map[string]node), + dependedNodes: make(map[string]node), + version: model.ApplicationLiveStateVersion{ + Timestamp: now.Unix(), + }, + } + s.apps[appID] = app + } + s.mu.Unlock() + + // Append the resource to the application's managingNodes. + if event, ok := app.addManagingResource(uid, key, obj, now); ok { + s.addEvent(event) + } + + // And update the resources. + s.mu.Lock() + s.resources[uid] = appResource{appID: appID, owners: owners, resource: obj} + s.mu.Unlock() + return + } + + // Try to determine the application ID by traveling its owners. + if appID == "" { + s.mu.RLock() + appID = s.findAppIDByOwners(owners) + s.mu.RUnlock() + } + + // Append the resource to the application's dependedNodes. + if appID != "" { + s.mu.RLock() + app, ok := s.apps[appID] + s.mu.RUnlock() + if ok { + if event, ok := app.addDependedResource(uid, key, obj, now); ok { + s.addEvent(event) + } + } + } + + // And update the resources. + s.mu.Lock() + s.resources[uid] = appResource{appID: appID, owners: owners, resource: obj} + s.mu.Unlock() +} + +func (s *store) onAddResource(obj *unstructured.Unstructured) { + appID := obj.GetAnnotations()[provider.LabelApplication] + s.addResource(obj, appID) +} + +func (s *store) onUpdateResource(oldObj, obj *unstructured.Unstructured) { + uid := string(obj.GetUID()) + appID := obj.GetAnnotations()[provider.LabelApplication] + // Depended nodes may not contain the app id in its annotations. + // In that case, preventing them from overwriting with an empty id + if appID == "" { + s.mu.RLock() + if r, ok := s.resources[uid]; ok { + appID = r.appID + } + s.mu.RUnlock() + } + s.addResource(obj, appID) +} + +func (s *store) onDeleteResource(obj *unstructured.Unstructured) { + var ( + uid = string(obj.GetUID()) + appID = obj.GetAnnotations()[provider.LabelApplication] + key = provider.MakeResourceKey(obj) + owners = obj.GetOwnerReferences() + now = time.Now() + ) + + // If this is a resource managed by PipeCD + // it must contain appID in its annotations and has no owners. + if appID != "" && len(owners) == 0 { + s.mu.Lock() + delete(s.resources, uid) + s.mu.Unlock() + + s.mu.RLock() + app, ok := s.apps[appID] + s.mu.RUnlock() + if ok { + if event, ok := app.deleteManagingResource(uid, key, now); ok { + s.addEvent(event) + } + } + return + } + + // Handle depended nodes from here. + if appID == "" { + s.mu.RLock() + if r, ok := s.resources[uid]; ok { + appID = r.appID + } + s.mu.RUnlock() + } + + // Try to determine the application ID by traveling its owners. + if appID == "" { + s.mu.RLock() + appID = s.findAppIDByOwners(owners) + s.mu.RUnlock() + } + + // This must be done before deleting the resource from the dependedNodes + // to ensure that all items in the resources list can be found from one of the app. + s.mu.Lock() + delete(s.resources, uid) + s.mu.Unlock() + + // Delete the resource to the application's dependedNodes. + s.mu.RLock() + app, ok := s.apps[appID] + s.mu.RUnlock() + if ok { + if event, ok := app.deleteDependedResource(uid, key, now); ok { + s.addEvent(event) + } + } +} + +func (s *store) getAppManagingNodes(appID string) map[string]node { + s.mu.RLock() + app, ok := s.apps[appID] + s.mu.RUnlock() + + if !ok { + return nil + } + return app.getManagingNodes() +} + +func (s *store) findAppIDByOwners(owners []metav1.OwnerReference) string { + for _, ref := range owners { + owner, ok := s.resources[string(ref.UID)] + // Owner does not present in the resources. + if !ok { + continue + } + // The owner is containing the appID. + if owner.appID != "" { + return owner.appID + } + // Try with the owners of the owner. + if appID := s.findAppIDByOwners(owner.owners); appID != "" { + return appID + } + } + return "" +} + +func (s *store) getAppLiveState(appID string) (AppState, bool) { + s.mu.RLock() + app, ok := s.apps[appID] + s.mu.RUnlock() + + if !ok { + return AppState{}, false + } + + var ( + nodes, version = app.getNodes() + resources = make([]*model.KubernetesResourceState, 0, len(nodes)) + ) + for i := range nodes { + state := nodes[i].state + resources = append(resources, &state) + } + + return AppState{ + Resources: resources, + Version: version, + }, true +} + +func (s *store) GetAppLiveManifests(appID string) []provider.Manifest { + s.mu.RLock() + app, ok := s.apps[appID] + s.mu.RUnlock() + + if !ok { + return nil + } + nodes := app.getManagingNodes() + manifests := make([]provider.Manifest, 0, len(nodes)) + for i := range nodes { + manifests = append(manifests, nodes[i].Manifest()) + } + return manifests +} + +func (s *store) addEvent(event model.KubernetesResourceStateEvent) { + s.eventMu.Lock() + defer s.eventMu.Unlock() + + s.events = append(s.events, event) + if len(s.events) < eventCacheMaxSize { + return + } + + num := len(s.events) - eventCacheSize + s.removeOldEvents(num) +} + +func (s *store) nextEvents(iteratorID, maxNum int) []model.KubernetesResourceStateEvent { + s.eventMu.Lock() + defer s.eventMu.Unlock() + + var ( + from = s.iterators[iteratorID] + to = len(s.events) + length = to - from + ) + if length <= 0 { + return nil + } + if length > maxNum { + to = from + maxNum - 1 + } + + events := s.events[from:to] + s.iterators[iteratorID] = to + + s.cleanStaleEvents() + return events +} + +func (s *store) cleanStaleEvents() { + var min int + for _, v := range s.iterators { + if v < min { + min = v + } + } + if min < eventCacheCleanOffset { + return + } + s.removeOldEvents(min) +} + +func (s *store) removeOldEvents(num int) { + if len(s.events) < num { + return + } + s.events = s.events[num-1:] + for k := range s.iterators { + newIndex := s.iterators[k] - num + if newIndex < 0 { + newIndex = 0 + } + s.iterators[k] = newIndex + } +} + +func (s *store) newEventIterator() EventIterator { + s.eventMu.Lock() + id := s.nextIteratorID + s.nextIteratorID++ + s.eventMu.Unlock() + + return EventIterator{ + id: id, + store: s, + } +} diff --git a/pkg/app/pipedv1/livestatestore/lambda/store.go b/pkg/app/pipedv1/livestatestore/lambda/store.go new file mode 100644 index 0000000000..e9a6fb2aa0 --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/lambda/store.go @@ -0,0 +1,51 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "context" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + List() []*model.Application +} + +type Store struct { + logger *zap.Logger +} + +type Getter interface { +} + +func NewStore(cfg *config.PlatformProviderLambdaConfig, platformProvider string, appLister applicationLister, logger *zap.Logger) *Store { + logger = logger.Named("lambda"). + With(zap.String("platform-provider", platformProvider)) + + return &Store{ + logger: logger, + } +} + +func (s *Store) Run(ctx context.Context) error { + s.logger.Info("start running lambda app state store") + + s.logger.Info("lambda app state store has been stopped") + return nil +} diff --git a/pkg/app/pipedv1/livestatestore/livestatestore.go b/pkg/app/pipedv1/livestatestore/livestatestore.go index fce4845235..1632abec99 100644 --- a/pkg/app/pipedv1/livestatestore/livestatestore.go +++ b/pkg/app/pipedv1/livestatestore/livestatestore.go @@ -25,6 +25,12 @@ import ( "go.uber.org/zap" "golang.org/x/sync/errgroup" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/ecs" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/lambda" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/livestatestore/terraform" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" "github.com/pipe-cd/pipecd/pkg/config" "github.com/pipe-cd/pipecd/pkg/model" ) @@ -34,7 +40,11 @@ type applicationLister interface { } type Getter interface { - // TODO: generic getter methods + CloudRunGetter(platformProvider string) (cloudrun.Getter, bool) + ECSRunGetter(platformProvider string) (ecs.Getter, bool) + KubernetesGetter(platformProvider string) (kubernetes.Getter, bool) + LambdaGetter(platformProvider string) (lambda.Getter, bool) + TerraformGetter(platformProvider string) (terraform.Getter, bool) } type Store interface { @@ -42,9 +52,40 @@ type Store interface { Getter() Getter } +type kubernetesStore interface { + Run(ctx context.Context) error + kubernetes.Getter +} + +type terraformStore interface { + Run(ctx context.Context) error +} + +type cloudRunStore interface { + Run(ctx context.Context) error + cloudrun.Getter +} + +type lambdaStore interface { + Run(ctx context.Context) error +} + +type ecsStore interface { + Run(ctx context.Context) error +} + // store manages a list of particular stores for all cloud providers. type store struct { - // TODO: generic store fields + // Map thats contains a list of kubernetesStore where key is the cloud provider name. + kubernetesStores map[string]kubernetesStore + // Map thats contains a list of terraformStore where key is the cloud provider name. + terraformStores map[string]terraformStore + // Map thats contains a list of cloudRunStore where key is the cloud provider name. + cloudrunStores map[string]cloudRunStore + // Map thats contains a list of lambdaStore where key is the cloud provider name. + lambdaStores map[string]lambdaStore + // Map thats contains a list of ecsStore where key is the cloud provider name. + ecsStores map[string]ecsStore gracePeriod time.Duration logger *zap.Logger @@ -54,11 +95,40 @@ func NewStore(ctx context.Context, cfg *config.PipedSpec, appLister applicationL logger = logger.Named("livestatestore") s := &store{ + kubernetesStores: make(map[string]kubernetesStore), + terraformStores: make(map[string]terraformStore), + cloudrunStores: make(map[string]cloudRunStore), + lambdaStores: make(map[string]lambdaStore), + ecsStores: make(map[string]ecsStore), gracePeriod: gracePeriod, logger: logger, } for _, cp := range cfg.PlatformProviders { - _ = cp // TODO: general state from plugin from store fields + switch cp.Type { + case model.PlatformProviderKubernetes: + store := kubernetes.NewStore(cp.KubernetesConfig, cfg, cp.Name, logger) + s.kubernetesStores[cp.Name] = store + + case model.PlatformProviderTerraform: + store := terraform.NewStore(cp.TerraformConfig, cp.Name, appLister, logger) + s.terraformStores[cp.Name] = store + + case model.PlatformProviderCloudRun: + store, err := cloudrun.NewStore(ctx, cp.CloudRunConfig, cp.Name, logger) + if err != nil { + logger.Error("failed to create a new cloudrun's livestatestore", zap.Error(err)) + continue + } + s.cloudrunStores[cp.Name] = store + + case model.PlatformProviderLambda: + store := lambda.NewStore(cp.LambdaConfig, cp.Name, appLister, logger) + s.lambdaStores[cp.Name] = store + + case model.PlatformProviderECS: + store := ecs.NewStore(cp.ECSConfig, cp.Name, appLister, logger) + s.ecsStores[cp.Name] = store + } } return s @@ -69,6 +139,41 @@ func (s *store) Run(ctx context.Context) error { group, ctx := errgroup.WithContext(ctx) + for i := range s.kubernetesStores { + cpName := i + group.Go(func() error { + return s.kubernetesStores[cpName].Run(ctx) + }) + } + + for i := range s.terraformStores { + cpName := i + group.Go(func() error { + return s.terraformStores[cpName].Run(ctx) + }) + } + + for i := range s.cloudrunStores { + cpName := i + group.Go(func() error { + return s.cloudrunStores[cpName].Run(ctx) + }) + } + + for i := range s.lambdaStores { + cpName := i + group.Go(func() error { + return s.lambdaStores[cpName].Run(ctx) + }) + } + + for i := range s.ecsStores { + cpName := i + group.Go(func() error { + return s.ecsStores[cpName].Run(ctx) + }) + } + err := group.Wait() if err == nil { s.logger.Info("all state stores have been stopped") @@ -82,6 +187,39 @@ func (s *store) Getter() Getter { return s } +func (s *store) CloudRunGetter(platformProvider string) (cloudrun.Getter, bool) { + ks, ok := s.cloudrunStores[platformProvider] + return ks, ok +} + +func (s *store) ECSRunGetter(platformProvider string) (ecs.Getter, bool) { + ks, ok := s.ecsStores[platformProvider] + return ks, ok +} + +func (s *store) KubernetesGetter(platformProvider string) (kubernetes.Getter, bool) { + ks, ok := s.kubernetesStores[platformProvider] + return ks, ok +} + +func (s *store) LambdaGetter(platformProvider string) (lambda.Getter, bool) { + ks, ok := s.lambdaStores[platformProvider] + return ks, ok +} + +func (s *store) TerraformGetter(platformProvider string) (terraform.Getter, bool) { + ks, ok := s.terraformStores[platformProvider] + return ks, ok +} + type LiveResourceLister struct { Getter } + +func (g LiveResourceLister) ListKubernetesAppLiveResources(platformProvider, appID string) ([]provider.Manifest, bool) { + kg, ok := g.KubernetesGetter(platformProvider) + if !ok { + return nil, false + } + return kg.GetAppLiveManifests(appID), true +} diff --git a/pkg/app/pipedv1/livestatestore/terraform/store.go b/pkg/app/pipedv1/livestatestore/terraform/store.go new file mode 100644 index 0000000000..7f1a74a931 --- /dev/null +++ b/pkg/app/pipedv1/livestatestore/terraform/store.go @@ -0,0 +1,51 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "context" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type applicationLister interface { + List() []*model.Application +} + +type Store struct { + logger *zap.Logger +} + +type Getter interface { +} + +func NewStore(cfg *config.PlatformProviderTerraformConfig, platformProvider string, appLister applicationLister, logger *zap.Logger) *Store { + logger = logger.Named("terraform"). + With(zap.String("platform-provider", platformProvider)) + + return &Store{ + logger: logger, + } +} + +func (s *Store) Run(ctx context.Context) error { + s.logger.Info("start running terraform app state store") + + s.logger.Info("terraform app state store has been stopped") + return nil +} diff --git a/pkg/app/pipedv1/planner/cloudrun/cloudrun.go b/pkg/app/pipedv1/planner/cloudrun/cloudrun.go new file mode 100644 index 0000000000..e52c1c7b8e --- /dev/null +++ b/pkg/app/pipedv1/planner/cloudrun/cloudrun.go @@ -0,0 +1,156 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "fmt" + "io" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/model" +) + +// Planner plans the deployment pipeline for CloudRun application. +type Planner struct { +} + +type registerer interface { + Register(k model.ApplicationKind, p planner.Planner) error +} + +// Register registers this planner into the given registerer. +func Register(r registerer) { + r.Register(model.ApplicationKind_CLOUDRUN, &Planner{}) +} + +// Plan decides which pipeline should be used for the given input. +func (p *Planner) Plan(ctx context.Context, in planner.Input) (out planner.Output, err error) { + ds, err := in.TargetDSP.Get(ctx, io.Discard) + if err != nil { + err = fmt.Errorf("error while preparing deploy source data (%v)", err) + return + } + + cfg := ds.ApplicationConfig.CloudRunApplicationSpec + if cfg == nil { + err = fmt.Errorf("missing CloudRunApplicationSpec in application configuration") + return + } + + // Determine application version from the manifest. + if version, e := p.determineVersion(ds.AppDir, cfg.Input.ServiceManifestFile); e != nil { + out.Version = "unknown" + in.Logger.Warn("unable to determine target version", zap.Error(e)) + } else { + out.Version = version + } + + if versions, e := p.determineVersions(ds.AppDir, cfg.Input.ServiceManifestFile); e != nil || len(versions) == 0 { + in.Logger.Warn("unable to determine target versions", zap.Error(e)) + out.Versions = []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_UNKNOWN, + Version: "unknown", + }, + } + } else { + out.Versions = versions + } + + autoRollback := *cfg.Input.AutoRollback + + // In case the strategy has been decided by trigger. + // For example: user triggered the deployment via web console. + switch in.Trigger.SyncStrategy { + case model.SyncStrategy_QUICK_SYNC: + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + case model.SyncStrategy_PIPELINE: + if cfg.Pipeline == nil { + err = fmt.Errorf("unable to force sync with pipeline because no pipeline was specified") + return + } + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + } + + // When no pipeline was configured, do the quick sync. + if cfg.Pipeline == nil || len(cfg.Pipeline.Stages) == 0 { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Quick sync to deploy image %s and configure all traffic to it (pipeline was not configured)", out.Version) + return + } + + // Force to use pipeline when the alwaysUsePipeline field was configured. + if cfg.Planner.AlwaysUsePipeline { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline (alwaysUsePipeline was set)" + return + } + + // This is the first time to deploy this application or it was unable to retrieve that value. + // We just do the quick sync. + if in.MostRecentSuccessfulCommitHash == "" { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Quick sync to deploy image %s and configure all traffic to it (it seems this is the first deployment)", out.Version) + return + } + + // Load service manifest at the last deployed commit to decide running version. + ds, err = in.RunningDSP.Get(ctx, io.Discard) + if err == nil { + if lastVersion, e := p.determineVersion(ds.AppDir, cfg.Input.ServiceManifestFile); e == nil { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Sync with pipeline to update image from %s to %s", lastVersion, out.Version) + return + } + } + + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline" + return +} + +func (p *Planner) determineVersion(appDir, serviceManifestFile string) (string, error) { + sm, err := provider.LoadServiceManifest(appDir, serviceManifestFile) + if err != nil { + return "", err + } + + return provider.FindImageTag(sm) +} + +func (p *Planner) determineVersions(appDir, serviceManifestFile string) ([]*model.ArtifactVersion, error) { + sm, err := provider.LoadServiceManifest(appDir, serviceManifestFile) + if err != nil { + return nil, err + } + + return provider.FindArtifactVersions(sm) +} diff --git a/pkg/app/pipedv1/planner/cloudrun/pipeline.go b/pkg/app/pipedv1/planner/cloudrun/pipeline.go new file mode 100644 index 0000000000..8795958ad8 --- /dev/null +++ b/pkg/app/pipedv1/planner/cloudrun/pipeline.go @@ -0,0 +1,120 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "fmt" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + stage, _ = planner.GetPredefinedStage(planner.PredefinedStageCloudRunSync) + stages = []config.PipelineStage{stage} + out = make([]*model.PipelineStage, 0, len(stages)) + ) + + for i, s := range stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: true, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} + +func buildProgressivePipeline(pp *config.DeploymentPipeline, autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + out = make([]*model.PipelineStage, 0, len(pp.Stages)) + ) + + for i, s := range pp.Stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: false, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} diff --git a/pkg/app/pipedv1/planner/ecs/ecs.go b/pkg/app/pipedv1/planner/ecs/ecs.go new file mode 100644 index 0000000000..d75511dce4 --- /dev/null +++ b/pkg/app/pipedv1/planner/ecs/ecs.go @@ -0,0 +1,156 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "context" + "fmt" + "io" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/ecs" + "github.com/pipe-cd/pipecd/pkg/model" +) + +// Planner plans the deployment pipeline for ECS application. +type Planner struct { +} + +type registerer interface { + Register(k model.ApplicationKind, p planner.Planner) error +} + +// Register registers this planner into the given registerer. +func Register(r registerer) { + r.Register(model.ApplicationKind_ECS, &Planner{}) +} + +// Plan decides which pipeline should be used for the given input. +func (p *Planner) Plan(ctx context.Context, in planner.Input) (out planner.Output, err error) { + ds, err := in.TargetDSP.Get(ctx, io.Discard) + if err != nil { + err = fmt.Errorf("error while preparing deploy source data (%v)", err) + return + } + + cfg := ds.ApplicationConfig.ECSApplicationSpec + if cfg == nil { + err = fmt.Errorf("missing ECSApplicationSpec in application configuration") + return + } + + // Determine application version from the task definition + if version, e := determineVersion(ds.AppDir, cfg.Input.TaskDefinitionFile); e != nil { + out.Version = "unknown" + in.Logger.Warn("unable to determine target version", zap.Error(e)) + } else { + out.Version = version + } + + if versions, e := determineVersions(ds.AppDir, cfg.Input.TaskDefinitionFile); e != nil || len(versions) == 0 { + in.Logger.Warn("unable to determine target versions", zap.Error(e)) + out.Versions = []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_UNKNOWN, + Version: "unknown", + }, + } + } else { + out.Versions = versions + } + + autoRollback := *cfg.Input.AutoRollback + + // In case the strategy has been decided by trigger. + // For example: user triggered the deployment via web console. + switch in.Trigger.SyncStrategy { + case model.SyncStrategy_QUICK_SYNC: + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + case model.SyncStrategy_PIPELINE: + if cfg.Pipeline == nil { + err = fmt.Errorf("unable to force sync with pipeline because no pipeline was specified") + return + } + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + } + + // When no pipeline was configured, perform the quick sync. + if cfg.Pipeline == nil || len(cfg.Pipeline.Stages) == 0 { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Quick sync to deploy image %s and configure all traffic to it (pipeline was not configured)", out.Version) + return + } + + // Force to use pipeline when the alwaysUsePipeline field was configured. + if cfg.Planner.AlwaysUsePipeline { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline (alwaysUsePipeline was set)" + return + } + + // If this is the first time to deploy this application or it was unable to retrieve last successful commit, + // we perform the quick sync strategy. + if in.MostRecentSuccessfulCommitHash == "" { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Quick sync to deploy image %s and configure all traffic to it (it seems this is the first deployment)", out.Version) + return + } + + // Load service manifest at the last deployed commit to decide running version. + ds, err = in.RunningDSP.Get(ctx, io.Discard) + if err == nil { + if lastVersion, e := determineVersion(ds.AppDir, cfg.Input.TaskDefinitionFile); e == nil { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Sync with pipeline to update image from %s to %s", lastVersion, out.Version) + return + } + } + + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline" + return +} + +func determineVersion(appDir, taskDefinitonFile string) (string, error) { + taskDefinition, err := provider.LoadTaskDefinition(appDir, taskDefinitonFile) + if err != nil { + return "", err + } + + return provider.FindImageTag(taskDefinition) +} + +func determineVersions(appDir, taskDefinitonFile string) ([]*model.ArtifactVersion, error) { + taskDefinition, err := provider.LoadTaskDefinition(appDir, taskDefinitonFile) + if err != nil { + return nil, err + } + + return provider.FindArtifactVersions(taskDefinition) +} diff --git a/pkg/app/pipedv1/planner/ecs/pipeline.go b/pkg/app/pipedv1/planner/ecs/pipeline.go new file mode 100644 index 0000000000..f3e123384f --- /dev/null +++ b/pkg/app/pipedv1/planner/ecs/pipeline.go @@ -0,0 +1,120 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "fmt" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + stage, _ = planner.GetPredefinedStage(planner.PredefinedStageECSSync) + stages = []config.PipelineStage{stage} + out = make([]*model.PipelineStage, 0, len(stages)) + ) + + for i, s := range stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: true, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} + +func buildProgressivePipeline(pp *config.DeploymentPipeline, autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + out = make([]*model.PipelineStage, 0, len(pp.Stages)) + ) + + for i, s := range pp.Stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: false, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} diff --git a/pkg/app/pipedv1/planner/ecs/pipeline_test.go b/pkg/app/pipedv1/planner/ecs/pipeline_test.go new file mode 100644 index 0000000000..e169dce236 --- /dev/null +++ b/pkg/app/pipedv1/planner/ecs/pipeline_test.go @@ -0,0 +1,54 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestBuildQuickSyncPipeline(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantAutoRollback bool + }{ + { + name: "want auto rollback stage", + wantAutoRollback: true, + }, + { + name: "don't want auto rollback stage", + wantAutoRollback: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + stages := buildQuickSyncPipeline(tc.wantAutoRollback, time.Now()) + var autoRollback bool + for _, stage := range stages { + if stage.Name == string(model.StageRollback) { + autoRollback = true + } + } + assert.Equal(t, tc.wantAutoRollback, autoRollback) + }) + } +} diff --git a/pkg/app/pipedv1/planner/kubernetes/kubernetes.go b/pkg/app/pipedv1/planner/kubernetes/kubernetes.go new file mode 100644 index 0000000000..0d29766094 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/kubernetes.go @@ -0,0 +1,538 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "encoding/json" + "fmt" + "io" + "sort" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes/resource" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/diff" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + versionUnknown = "unknown" +) + +// Planner plans the deployment pipeline for kubernetes application. +type Planner struct { +} + +type registerer interface { + Register(k model.ApplicationKind, p planner.Planner) error +} + +// Register registers this planner into the given registerer. +func Register(r registerer) { + r.Register(model.ApplicationKind_KUBERNETES, &Planner{}) +} + +// Plan decides which pipeline should be used for the given input. +func (p *Planner) Plan(ctx context.Context, in planner.Input) (out planner.Output, err error) { + ds, err := in.TargetDSP.Get(ctx, io.Discard) + if err != nil { + err = fmt.Errorf("error while preparing deploy source data (%v)", err) + return + } + cfg := ds.ApplicationConfig.KubernetesApplicationSpec + if cfg == nil { + err = fmt.Errorf("missing KubernetesApplicationSpec in application configuration") + return + } + + if cfg.Input.HelmChart != nil { + chartRepoName := cfg.Input.HelmChart.Repository + if chartRepoName != "" { + cfg.Input.HelmChart.Insecure = in.PipedConfig.IsInsecureChartRepository(chartRepoName) + } + } + + manifestCache := provider.AppManifestsCache{ + AppID: in.ApplicationID, + Cache: in.AppManifestsCache, + Logger: in.Logger, + } + + // Load previous deployed manifests and new manifests to compare. + newManifests, ok := manifestCache.Get(in.Trigger.Commit.Hash) + if !ok { + // When the manifests were not in the cache we have to load them. + loader := provider.NewLoader(in.ApplicationName, ds.AppDir, ds.RepoDir, in.GitPath.ConfigFilename, cfg.Input, in.GitClient, in.Logger) + newManifests, err = loader.LoadManifests(ctx) + if err != nil { + return + } + manifestCache.Put(in.Trigger.Commit.Hash, newManifests) + } + + // Determine application version from the manifests. + if version, e := determineVersion(newManifests); e != nil { + in.Logger.Warn("unable to determine version", zap.Error(e)) + out.Version = versionUnknown + } else { + out.Version = version + } + + if versions, e := determineVersions(newManifests); e != nil || len(versions) == 0 { + in.Logger.Warn("unable to determine versions", zap.Error(e)) + out.Versions = []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_UNKNOWN, + Version: versionUnknown, + }, + } + } else { + out.Versions = versions + } + + autoRollback := *cfg.Input.AutoRollback + + // In case the strategy has been decided by trigger. + // For example: user triggered the deployment via web console. + switch in.Trigger.SyncStrategy { + case model.SyncStrategy_QUICK_SYNC: + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + case model.SyncStrategy_PIPELINE: + if cfg.Pipeline == nil { + err = fmt.Errorf("unable to force sync with pipeline because no pipeline was specified") + return + } + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + } + + // If the progressive pipeline was not configured + // we have only one choise to do is applying all manifestt. + if cfg.Pipeline == nil || len(cfg.Pipeline.Stages) == 0 { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = "Quick sync by applying all manifests (no pipeline was configured)" + return + } + + // Force to use pipeline when the alwaysUsePipeline field was configured. + if cfg.Planner.AlwaysUsePipeline { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline (alwaysUsePipeline was set)" + return + } + + // This deployment is triggered by a commit with the intent to perform pipeline. + // Commit Matcher will be ignored when triggered by a command. + if p := cfg.CommitMatcher.Pipeline; p != "" && in.Trigger.Commander == "" { + pipelineRegex, err := in.RegexPool.Get(p) + if err != nil { + err = fmt.Errorf("failed to compile commitMatcher.pipeline(%s): %w", p, err) + return out, err + } + if pipelineRegex.MatchString(in.Trigger.Commit.Message) { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Sync progressively because the commit message was matching %q", p) + return out, err + } + } + + // This deployment is triggered by a commit with the intent to synchronize. + // Commit Matcher will be ignored when triggered by a command. + if s := cfg.CommitMatcher.QuickSync; s != "" && in.Trigger.Commander == "" { + syncRegex, err := in.RegexPool.Get(s) + if err != nil { + err = fmt.Errorf("failed to compile commitMatcher.sync(%s): %w", s, err) + return out, err + } + if syncRegex.MatchString(in.Trigger.Commit.Message) { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Quick sync by applying all manifests because the commit message was matching %q", s) + return out, err + } + } + + // This is the first time to deploy this application + // or it was unable to retrieve that value. + // We just apply all manifests. + if in.MostRecentSuccessfulCommitHash == "" { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = "Quick sync by applying all manifests because it seems this is the first deployment" + return + } + + // Load manifests of the previously applied commit. + oldManifests, ok := manifestCache.Get(in.MostRecentSuccessfulCommitHash) + if !ok { + // When the manifests were not in the cache we have to load them. + var runningDs *deploysource.DeploySource + runningDs, err = in.RunningDSP.Get(ctx, io.Discard) + if err != nil { + err = fmt.Errorf("failed to prepare the running deploy source data (%v)", err) + return + } + + runningCfg := runningDs.ApplicationConfig.KubernetesApplicationSpec + if runningCfg == nil { + err = fmt.Errorf("unable to find the running configuration (%v)", err) + return + } + loader := provider.NewLoader(in.ApplicationName, runningDs.AppDir, runningDs.RepoDir, in.GitPath.ConfigFilename, runningCfg.Input, in.GitClient, in.Logger) + oldManifests, err = loader.LoadManifests(ctx) + if err != nil { + err = fmt.Errorf("failed to load previously deployed manifests: %w", err) + return + } + manifestCache.Put(in.MostRecentSuccessfulCommitHash, oldManifests) + } + + progressive, desc := decideStrategy(oldManifests, newManifests, cfg.Workloads, in.Logger) + out.Summary = desc + + if progressive { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + return + } + + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + return +} + +// First up, checks to see if the workload's `spec.template` has been changed, +// and then checks if the configmap/secret's data. +func decideStrategy(olds, news []provider.Manifest, workloadRefs []config.K8sResourceReference, logger *zap.Logger) (progressive bool, desc string) { + oldWorkloads := findWorkloadManifests(olds, workloadRefs) + if len(oldWorkloads) == 0 { + desc = "Quick sync by applying all manifests because it was unable to find the currently running workloads" + return + } + newWorkloads := findWorkloadManifests(news, workloadRefs) + if len(newWorkloads) == 0 { + desc = "Quick sync by applying all manifests because it was unable to find workloads in the new manifests" + return + } + + workloads := findUpdatedWorkloads(oldWorkloads, newWorkloads) + diffs := make(map[provider.ResourceKey]diff.Nodes, len(workloads)) + + for _, w := range workloads { + // If the workload's pod template was touched + // do progressive deployment with the specified pipeline. + diffResult, err := provider.Diff(w.old, w.new, logger) + if err != nil { + progressive = true + desc = fmt.Sprintf("Sync progressively due to an error while calculating the diff (%v)", err) + return + } + diffNodes := diffResult.Nodes() + diffs[w.new.Key] = diffNodes + + templateDiffs := diffNodes.FindByPrefix("spec.template") + if len(templateDiffs) > 0 { + progressive = true + + if msg, changed := checkImageChange(templateDiffs); changed { + desc = msg + return + } + + desc = fmt.Sprintf("Sync progressively because pod template of workload %s was changed", w.new.Key.Name) + return + } + } + + // If the config/secret was touched, we also need to do progressive + // deployment to check run with the new config/secret content. + oldConfigs := findConfigs(olds) + newConfigs := findConfigs(news) + if len(oldConfigs) > len(newConfigs) { + progressive = true + desc = fmt.Sprintf("Sync progressively because %d configmap/secret deleted", len(oldConfigs)-len(newConfigs)) + return + } + if len(oldConfigs) < len(newConfigs) { + progressive = true + desc = fmt.Sprintf("Sync progressively because new %d configmap/secret added", len(newConfigs)-len(oldConfigs)) + return + } + for k, oc := range oldConfigs { + nc, ok := newConfigs[k] + if !ok { + progressive = true + desc = fmt.Sprintf("Sync progressively because %s %s was deleted", oc.Key.Kind, oc.Key.Name) + return + } + result, err := provider.Diff(oc, nc, logger) + if err != nil { + progressive = true + desc = fmt.Sprintf("Sync progressively due to an error while calculating the diff (%v)", err) + return + } + if result.HasDiff() { + progressive = true + desc = fmt.Sprintf("Sync progressively because %s %s was updated", oc.Key.Kind, oc.Key.Name) + return + } + } + + // Check if this is a scaling commit. + scales := make([]string, 0, len(diffs)) + for k, d := range diffs { + if before, after, changed := checkReplicasChange(d); changed { + scales = append(scales, fmt.Sprintf("%s/%s from %s to %s", k.Kind, k.Name, before, after)) + } + + } + sort.Strings(scales) + if len(scales) > 0 { + desc = fmt.Sprintf("Quick sync to scale %s", strings.Join(scales, ", ")) + return + } + + desc = "Quick sync by applying all manifests" + return +} + +func findWorkloadManifests(manifests []provider.Manifest, refs []config.K8sResourceReference) []provider.Manifest { + if len(refs) == 0 { + return findManifests(provider.KindDeployment, "", manifests) + } + + workloads := make([]provider.Manifest, 0) + for _, ref := range refs { + kind := provider.KindDeployment + if ref.Kind != "" { + kind = ref.Kind + } + ms := findManifests(kind, ref.Name, manifests) + workloads = append(workloads, ms...) + } + return workloads +} + +func findManifests(kind, name string, manifests []provider.Manifest) []provider.Manifest { + out := make([]provider.Manifest, 0, len(manifests)) + for _, m := range manifests { + if m.Key.Kind != kind { + continue + } + if name != "" && m.Key.Name != name { + continue + } + out = append(out, m) + } + return out +} + +type workloadPair struct { + old provider.Manifest + new provider.Manifest +} + +func findUpdatedWorkloads(olds, news []provider.Manifest) []workloadPair { + pairs := make([]workloadPair, 0) + oldMap := make(map[provider.ResourceKey]provider.Manifest, len(olds)) + nomalizeKey := func(k provider.ResourceKey) provider.ResourceKey { + // Ignoring APIVersion because user can upgrade to the new APIVersion for the same workload. + k.APIVersion = "" + if k.Namespace == provider.DefaultNamespace { + k.Namespace = "" + } + return k + } + for _, m := range olds { + key := nomalizeKey(m.Key) + oldMap[key] = m + } + for _, n := range news { + key := nomalizeKey(n.Key) + if o, ok := oldMap[key]; ok { + pairs = append(pairs, workloadPair{ + old: o, + new: n, + }) + } + } + return pairs +} + +func findConfigs(manifests []provider.Manifest) map[provider.ResourceKey]provider.Manifest { + configs := make(map[provider.ResourceKey]provider.Manifest) + for _, m := range manifests { + if m.Key.IsConfigMap() { + configs[m.Key] = m + } + if m.Key.IsSecret() { + configs[m.Key] = m + } + } + return configs +} + +func checkImageChange(ns diff.Nodes) (string, bool) { + const containerImageQuery = `^spec\.template\.spec\.containers\.\d+.image$` + nodes, _ := ns.Find(containerImageQuery) + if len(nodes) == 0 { + return "", false + } + + images := make([]string, 0, len(ns)) + for _, n := range nodes { + beforeImg := parseContainerImage(n.StringX()) + afterImg := parseContainerImage(n.StringY()) + + if beforeImg.name == afterImg.name { + images = append(images, fmt.Sprintf("image %s from %s to %s", beforeImg.name, beforeImg.tag, afterImg.tag)) + } else { + images = append(images, fmt.Sprintf("image %s:%s to %s:%s", beforeImg.name, beforeImg.tag, afterImg.name, afterImg.tag)) + } + } + desc := fmt.Sprintf("Sync progressively because of updating %s", strings.Join(images, ", ")) + return desc, true +} + +func checkReplicasChange(ns diff.Nodes) (before, after string, changed bool) { + const replicasQuery = `^spec\.replicas$` + node, err := ns.FindOne(replicasQuery) + if err != nil { + return + } + + before = node.StringX() + after = node.StringY() + changed = true + return +} + +type containerImage struct { + name string + tag string +} + +func parseContainerImage(image string) (img containerImage) { + parts := strings.Split(image, ":") + if len(parts) == 2 { + img.tag = parts[1] + } + paths := strings.Split(parts[0], "/") + img.name = paths[len(paths)-1] + return +} + +// determineVersion decides running version of an application based on its manifests. +// Currently, this shows the tag values of using container images. +// In case only one container is used, its tag value will be returned. +// +// TODO: Add ability to configure how to determine application version. +func determineVersion(manifests []provider.Manifest) (string, error) { + images := make([]containerImage, 0) + + for _, m := range manifests { + if !m.Key.IsDeployment() { + continue + } + data, err := m.MarshalJSON() + if err != nil { + return "", err + } + var d resource.Deployment + if err := json.Unmarshal(data, &d); err != nil { + return "", err + } + + containers := d.Spec.Template.Spec.Containers + for _, c := range containers { + images = append(images, parseContainerImage(c.Image)) + } + } + + if len(images) == 0 { + return versionUnknown, nil + } + + // In case the workload is containing only one container + // return only the tag name. + if len(images) == 1 { + return images[0].tag, nil + } + + // In case multiple containers are used + // return version in format: "tag-1 (name-1), tag-2 (name-2)" + var b strings.Builder + b.WriteString(fmt.Sprintf("%s (%s)", images[0].tag, images[0].name)) + + for _, img := range images[1:] { + b.WriteString(fmt.Sprintf(", %s (%s)", img.tag, img.name)) + } + + return b.String(), nil +} + +// determineVersions decides artifact versions of an application. +// It finds all container images that are being specified in the workload manifests then returns their names, version numbers, and urls. +func determineVersions(manifests []provider.Manifest) ([]*model.ArtifactVersion, error) { + imageMap := map[string]struct{}{} + for _, m := range manifests { + // TODO: Determine container image version from other workload kinds such as StatefulSet, Pod, Daemon, CronJob... + if !m.Key.IsDeployment() { + continue + } + data, err := m.MarshalJSON() + if err != nil { + return nil, err + } + var d resource.Deployment + if err := json.Unmarshal(data, &d); err != nil { + return nil, err + } + + containers := d.Spec.Template.Spec.Containers + // Remove duplicate images on multiple manifests. + for _, c := range containers { + imageMap[c.Image] = struct{}{} + } + } + + versions := make([]*model.ArtifactVersion, 0, len(imageMap)) + for i := range imageMap { + image := parseContainerImage(i) + versions = append(versions, &model.ArtifactVersion{ + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: image.tag, + Name: image.name, + Url: i, + }) + } + + return versions, nil +} diff --git a/pkg/app/pipedv1/planner/kubernetes/kubernetes_test.go b/pkg/app/pipedv1/planner/kubernetes/kubernetes_test.go new file mode 100644 index 0000000000..2b3933bb59 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/kubernetes_test.go @@ -0,0 +1,605 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestDecideStrategy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + olds []provider.Manifest + news []provider.Manifest + workloadRefs []config.K8sResourceReference + wantProgressive bool + wantDesc string + }{ + { + name: "no workload in the old commit", + news: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name", + }, + }, + }, + wantProgressive: false, + wantDesc: "Quick sync by applying all manifests because it was unable to find the currently running workloads", + }, + { + name: "no workload in the new commit", + olds: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name", + }, + }, + }, + news: []provider.Manifest{ + { + Key: provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindService, + }, + }, + }, + wantProgressive: false, + wantDesc: "Quick sync by applying all manifests because it was unable to find workloads in the new manifests", + }, + { + name: "pod template was changed", + olds: func() []provider.Manifest { + m := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "foo"}}}, + ) + return []provider.Manifest{m} + }(), + news: func() []provider.Manifest { + m := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "bar"}}}, + ) + return []provider.Manifest{m} + }(), + wantProgressive: true, + wantDesc: "Sync progressively because pod template of workload name was changed", + }, + { + name: "mutilple workloads: pod template was changed", + olds: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "foo-1"}}}, + ) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "foo-2"}}}, + ) + return []provider.Manifest{m1, m2} + }(), + news: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "foo-1"}}}, + ) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "bar-2"}}}, + ) + return []provider.Manifest{m1, m2} + }(), + wantProgressive: true, + wantDesc: "Sync progressively because pod template of workload name-2 was changed", + }, + { + name: "changed deployment was not the target", + olds: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "foo-1"}}}, + ) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "foo-2"}}}, + ) + return []provider.Manifest{m1, m2} + }(), + news: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "foo-1"}}}, + ) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{"template": "bar-2"}}}, + ) + return []provider.Manifest{m1, m2} + }(), + workloadRefs: []config.K8sResourceReference{ + { + Kind: provider.KindDeployment, + Name: "name-1", + }, + }, + wantProgressive: false, + wantDesc: "Quick sync by applying all manifests", + }, + { + name: "scale one deployment", + olds: func() []provider.Manifest { + m := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{ + "template": "foo", + "replicas": 1, + }}}, + ) + return []provider.Manifest{m} + }(), + news: func() []provider.Manifest { + m := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{ + "template": "foo", + "replicas": 2, + }}}, + ) + return []provider.Manifest{m} + }(), + wantProgressive: false, + wantDesc: "Quick sync to scale Deployment/name from 1 to 2", + }, + { + name: "scale multiple deployments", + olds: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{ + "template": "foo", + "replicas": 1, + }}}, + ) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{ + "template": "bar", + "replicas": 20, + }}}, + ) + return []provider.Manifest{m1, m2} + }(), + news: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{ + "template": "foo", + "replicas": 5, + }}}, + ) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + Name: "name-2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"spec": map[string]interface{}{ + "template": "bar", + "replicas": 10, + }}}, + ) + return []provider.Manifest{m1, m2} + }(), + wantProgressive: false, + wantDesc: "Quick sync to scale Deployment/name-1 from 1 to 5, Deployment/name-2 from 20 to 10", + }, + { + name: "configmap deleted", + olds: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + }, &unstructured.Unstructured{}) + return []provider.Manifest{m1, m2} + }(), + news: func() []provider.Manifest { + m := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + return []provider.Manifest{m} + }(), + wantProgressive: true, + wantDesc: "Sync progressively because 1 configmap/secret deleted", + }, + { + name: "new configmap added", + olds: func() []provider.Manifest { + m := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + return []provider.Manifest{m} + }(), + news: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + }, &unstructured.Unstructured{}) + return []provider.Manifest{m1, m2} + }(), + wantProgressive: true, + wantDesc: "Sync progressively because new 1 configmap/secret added", + }, + { + name: "one configmap updated", + olds: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "foo"}}, + ) + m3 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "baz"}}, + ) + return []provider.Manifest{m1, m2, m3} + }(), + news: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "bar"}}, + ) + m3 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "baz"}}, + ) + return []provider.Manifest{m1, m2, m3} + }(), + wantProgressive: true, + wantDesc: "Sync progressively because ConfigMap configmap1 was updated", + }, + { + name: "all configmaps as is", + olds: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "foo"}}, + ) + m3 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "baz"}}, + ) + return []provider.Manifest{m1, m2, m3} + }(), + news: func() []provider.Manifest { + m1 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "apps/v1", + Kind: provider.KindDeployment, + }, &unstructured.Unstructured{}) + m2 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap1", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "foo"}}, + ) + m3 := provider.MakeManifest(provider.ResourceKey{ + APIVersion: "v1", + Kind: provider.KindConfigMap, + Name: "configmap2", + }, &unstructured.Unstructured{ + Object: map[string]interface{}{"data": "baz"}}, + ) + return []provider.Manifest{m1, m2, m3} + }(), + wantProgressive: false, + wantDesc: "Quick sync by applying all manifests", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + gotProgressive, gotDesc := decideStrategy(tc.olds, tc.news, tc.workloadRefs, zap.NewNop()) + assert.Equal(t, tc.wantProgressive, gotProgressive) + assert.Equal(t, tc.wantDesc, gotDesc) + }) + } +} + +func TestDetermineVersion(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifests string + expected string + expectedError error + }{ + { + name: "no workload", + manifests: "testdata/version_no_workload.yaml", + expected: "unknown", + }, + { + name: "single container", + manifests: "testdata/version_single_container.yaml", + expected: "v1.0.0", + }, + { + name: "multiple containers", + manifests: "testdata/version_multi_containers.yaml", + expected: "v1.0.0 (helloworld), v0.6.0 (my-service)", + }, + { + name: "multiple workloads", + manifests: "testdata/version_multi_workloads.yaml", + expected: "v1.0.0 (helloworld), v0.5.0 (my-service)", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.LoadManifestsFromYAMLFile(tc.manifests) + require.NoError(t, err) + + version, err := determineVersion(manifests) + assert.Equal(t, tc.expected, version) + assert.Equal(t, tc.expectedError, err) + }) + } +} + +func TestDetermineVersions(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifests string + expected []*model.ArtifactVersion + expectedError error + }{ + { + name: "no workload", + manifests: "testdata/version_no_workload.yaml", + expected: []*model.ArtifactVersion{}, + }, + { + name: "single container", + manifests: "testdata/version_single_container.yaml", + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v1.0.0", + }, + }, + }, + { + name: "multiple containers", + manifests: "testdata/version_multi_containers.yaml", + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v1.0.0", + }, + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v0.6.0", + Name: "my-service", + Url: "gcr.io/pipecd/my-service:v0.6.0", + }, + }, + }, + { + name: "multiple workloads", + manifests: "testdata/version_multi_workloads.yaml", + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v1.0.0", + }, + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v0.5.0", + Name: "my-service", + Url: "gcr.io/pipecd/my-service:v0.5.0", + }, + }, + }, + { + name: "multiple workloads using same container image", + manifests: "testdata/version_multi_workloads_same_image.yaml", + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v1.0.0", + }, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := provider.LoadManifestsFromYAMLFile(tc.manifests) + require.NoError(t, err) + + versions, err := determineVersions(manifests) + assert.ElementsMatch(t, tc.expected, versions) + assert.Equal(t, tc.expectedError, err) + }) + } +} + +func TestCheckImageChange(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + oldManifests string + newManifests string + msg string + changed bool + expectedError error + }{ + { + name: "no diff", + msg: "", + oldManifests: "testdata/version_multi_containers.yaml", + newManifests: "testdata/version_multi_containers.yaml", + changed: false, + }, + { + name: "change only tag", + oldManifests: "testdata/check_image_tag/old.yaml", + newManifests: "testdata/check_image_tag/new.yaml", + msg: "Sync progressively because of updating image foo from v0.1 to v0.2", + changed: true, + }, + { + name: "change name and tag", + oldManifests: "testdata/check_image_name_tag/old.yaml", + newManifests: "testdata/check_image_name_tag/new.yaml", + msg: "Sync progressively because of updating image foo:v0.1 to bar:v0.2", + changed: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + oldManifests, err := provider.LoadManifestsFromYAMLFile(tc.oldManifests) + require.NoError(t, err) + + newManifests, err := provider.LoadManifestsFromYAMLFile(tc.newManifests) + require.NoError(t, err) + + workloads := findUpdatedWorkloads(oldManifests, newManifests) + for _, w := range workloads { + diffResult, err := provider.Diff(w.old, w.new, zap.NewNop()) + require.NoError(t, err) + diffNodes := diffResult.Nodes() + templateDiffs := diffNodes.FindByPrefix("spec.template") + + msg, changed := checkImageChange(templateDiffs) + + assert.Equal(t, tc.msg, msg) + assert.Equal(t, tc.changed, changed) + } + }) + } +} diff --git a/pkg/app/pipedv1/planner/kubernetes/pipeline.go b/pkg/app/pipedv1/planner/kubernetes/pipeline.go new file mode 100644 index 0000000000..16230fee1b --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/pipeline.go @@ -0,0 +1,146 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + stage, _ = planner.GetPredefinedStage(planner.PredefinedStageK8sSync) + stages = []config.PipelineStage{stage} + out = make([]*model.PipelineStage, 0, len(stages)) + ) + + for i, s := range stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: true, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} + +func buildProgressivePipeline(pp *config.DeploymentPipeline, autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + out = make([]*model.PipelineStage, 0, len(pp.Stages)) + ) + + for i, s := range pp.Stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: false, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + + // Add a stage for rolling back script run stages. + for i, s := range pp.Stages { + if s.Name == model.StageScriptRun { + // Use metadata as a way to pass parameters to the stage. + envStr, _ := json.Marshal(s.ScriptRunStageOptions.Env) + metadata := map[string]string{ + "baseStageID": out[i].Id, + "onRollback": s.ScriptRunStageOptions.OnRollback, + "env": string(envStr), + } + ss, _ := planner.GetPredefinedStage(planner.PredefinedStageScriptRunRollback) + out = append(out, &model.PipelineStage{ + Id: ss.ID, + Name: ss.Name.String(), + Desc: ss.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: metadata, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + } + } + + return out +} diff --git a/pkg/app/pipedv1/planner/kubernetes/pipeline_test.go b/pkg/app/pipedv1/planner/kubernetes/pipeline_test.go new file mode 100644 index 0000000000..b290e908e3 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/pipeline_test.go @@ -0,0 +1,85 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestBuildQuickSyncPipeline(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantAutoRollback bool + }{ + { + name: "want auto rollback stage", + wantAutoRollback: true, + }, + { + name: "don't want auto rollback stage", + wantAutoRollback: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + gotStages := buildQuickSyncPipeline(tc.wantAutoRollback, time.Now()) + var gotAutoRollback bool + for _, stage := range gotStages { + if stage.Name == string(model.StageRollback) { + gotAutoRollback = true + } + } + assert.Equal(t, tc.wantAutoRollback, gotAutoRollback) + }) + } +} + +func TestBuildProgressivePipeline(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantAutoRollback bool + }{ + { + name: "want auto rollback stage", + wantAutoRollback: true, + }, + { + name: "don't want auto rollback stage", + wantAutoRollback: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + gotStages := buildProgressivePipeline(&config.DeploymentPipeline{}, tc.wantAutoRollback, time.Now()) + var gotAutoRollback bool + for _, stage := range gotStages { + if stage.Name == string(model.StageRollback) { + gotAutoRollback = true + } + } + assert.Equal(t, tc.wantAutoRollback, gotAutoRollback) + }) + } +} diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_name_tag/new.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_name_tag/new.yaml new file mode 100644 index 0000000000..3481df64cd --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_name_tag/new.yaml @@ -0,0 +1,15 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: foo +spec: + selector: + matchLabels: + app: foo + replicas: 1 + template: + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: foo + image: fugafuga.io/bar:v0.2 diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_name_tag/old.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_name_tag/old.yaml new file mode 100644 index 0000000000..61b5d47fc1 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_name_tag/old.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: foo +spec: + selector: + matchLabels: + app: foo + replicas: 1 + template: + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: foo + image: hogehoge.io/foo:v0.1 + env: + - name: test_env + valueFrom: + configMapKeyRef: + name: test_env_name + key: test_env_key diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_tag/new.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_tag/new.yaml new file mode 100644 index 0000000000..ef9d5e9b97 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_tag/new.yaml @@ -0,0 +1,15 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: foo +spec: + selector: + matchLabels: + app: foo + replicas: 1 + template: + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: foo + image: fugafuga.io/foo:v0.2 diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_tag/old.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_tag/old.yaml new file mode 100644 index 0000000000..61b5d47fc1 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/check_image_tag/old.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: foo +spec: + selector: + matchLabels: + app: foo + replicas: 1 + template: + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: foo + image: hogehoge.io/foo:v0.1 + env: + - name: test_env + valueFrom: + configMapKeyRef: + name: test_env_name + key: test_env_key diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_containers.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_containers.yaml new file mode 100644 index 0000000000..b2bdaa2069 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_containers.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hello + - hi + ports: + - containerPort: 9085 + - name: my-service + image: gcr.io/pipecd/my-service:v0.6.0 + args: + - hi + ports: + - containerPort: 9090 +--- +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_workloads.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_workloads.yaml new file mode 100644 index 0000000000..62e7a94b83 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_workloads.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hello + - hi + ports: + - containerPort: 9085 +--- +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-service + labels: + pipecd.dev/managed-by: piped + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/my-service:v0.5.0 + args: + - hi + - hello + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_workloads_same_image.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_workloads_same_image.yaml new file mode 100644 index 0000000000..21abdf39d5 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/version_multi_workloads_same_image.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hello + - hi + ports: + - containerPort: 9085 +--- +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-service + labels: + pipecd.dev/managed-by: piped + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/version_no_workload.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/version_no_workload.yaml new file mode 100644 index 0000000000..e4fa7be29f --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/version_no_workload.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 diff --git a/pkg/app/pipedv1/planner/kubernetes/testdata/version_single_container.yaml b/pkg/app/pipedv1/planner/kubernetes/testdata/version_single_container.yaml new file mode 100644 index 0000000000..da191e7d46 --- /dev/null +++ b/pkg/app/pipedv1/planner/kubernetes/testdata/version_single_container.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hello + - hi + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/planner/lambda/lambda.go b/pkg/app/pipedv1/planner/lambda/lambda.go new file mode 100644 index 0000000000..b066973b63 --- /dev/null +++ b/pkg/app/pipedv1/planner/lambda/lambda.go @@ -0,0 +1,171 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "context" + "fmt" + "io" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/lambda" + "github.com/pipe-cd/pipecd/pkg/model" +) + +// Planner plans the deployment pipeline for Lambda application. +type Planner struct { +} + +type registerer interface { + Register(k model.ApplicationKind, p planner.Planner) error +} + +// Register registers this planner into the given registerer. +func Register(r registerer) { + r.Register(model.ApplicationKind_LAMBDA, &Planner{}) +} + +// Plan decides which pipeline should be used for the given input. +func (p *Planner) Plan(ctx context.Context, in planner.Input) (out planner.Output, err error) { + ds, err := in.TargetDSP.Get(ctx, io.Discard) + if err != nil { + err = fmt.Errorf("error while preparing deploy source data (%v)", err) + return + } + + cfg := ds.ApplicationConfig.LambdaApplicationSpec + if cfg == nil { + err = fmt.Errorf("missing LambdaApplicationSpec in application configuration") + return + } + + // Determine application version from the manifest + if version, e := determineVersion(ds.AppDir, cfg.Input.FunctionManifestFile); e != nil { + out.Version = "unknown" + in.Logger.Warn("unable to determine target version", zap.Error(e)) + } else { + out.Version = version + } + + if versions, e := determineVersions(ds.AppDir, cfg.Input.FunctionManifestFile); e != nil || len(versions) == 0 { + in.Logger.Warn("unable to determine target versions", zap.Error(e)) + out.Versions = []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_UNKNOWN, + Version: "unknown", + }, + } + } else { + out.Versions = versions + } + + autoRollback := *cfg.Input.AutoRollback + + // In case the strategy has been decided by trigger. + // For example: user triggered the deployment via web console. + switch in.Trigger.SyncStrategy { + case model.SyncStrategy_QUICK_SYNC: + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + case model.SyncStrategy_PIPELINE: + if cfg.Pipeline == nil { + err = fmt.Errorf("unable to force sync with pipeline because no pipeline was specified") + return + } + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + } + + // When no pipeline was configured, perform the quick sync. + if cfg.Pipeline == nil || len(cfg.Pipeline.Stages) == 0 { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Quick sync to deploy version %s and configure all traffic to it (pipeline was not configured)", out.Version) + return + } + + // Force to use pipeline when the alwaysUsePipeline field was configured. + if cfg.Planner.AlwaysUsePipeline { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline (alwaysUsePipeline was set)" + return + } + + // If this is the first time to deploy this application or it was unable to retrieve last successful commit, + // we perform the quick sync strategy. + if in.MostRecentSuccessfulCommitHash == "" { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Quick sync to deploy version %s and configure all traffic to it (it seems this is the first deployment)", out.Version) + return + } + + // Load service manifest at the last deployed commit to decide running version. + ds, err = in.RunningDSP.Get(ctx, io.Discard) + if err == nil { + if lastVersion, e := determineVersion(ds.AppDir, cfg.Input.FunctionManifestFile); e == nil { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = fmt.Sprintf("Sync with pipeline to update version from %s to %s", lastVersion, out.Version) + return + } + } + + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, autoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline" + return +} + +func determineVersion(appDir, functionManifestFile string) (string, error) { + fm, err := provider.LoadFunctionManifest(appDir, functionManifestFile) + if err != nil { + return "", err + } + + // Extract container image tag as application version. + if fm.Spec.ImageURI != "" { + return provider.FindImageTag(fm) + } + + // Extract s3 object version as application version. + if fm.Spec.S3ObjectVersion != "" { + return fm.Spec.S3ObjectVersion, nil + } + + // Extract source code commitish as application version. + if fm.Spec.SourceCode.Ref != "" { + return fm.Spec.SourceCode.Ref, nil + } + + return "", fmt.Errorf("unable to determine version from manifest") +} + +func determineVersions(appDir, functionManifestFile string) ([]*model.ArtifactVersion, error) { + fm, err := provider.LoadFunctionManifest(appDir, functionManifestFile) + if err != nil { + return nil, err + } + + return provider.FindArtifactVersions(fm) +} diff --git a/pkg/app/pipedv1/planner/lambda/pipeline.go b/pkg/app/pipedv1/planner/lambda/pipeline.go new file mode 100644 index 0000000000..a6a44222e8 --- /dev/null +++ b/pkg/app/pipedv1/planner/lambda/pipeline.go @@ -0,0 +1,138 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "fmt" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + stage, _ = planner.GetPredefinedStage(planner.PredefinedStageLambdaSync) + stages = []config.PipelineStage{stage} + out = make([]*model.PipelineStage, 0, len(stages)) + ) + + for i, s := range stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: true, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} + +func buildProgressivePipeline(pp *config.DeploymentPipeline, autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + out = make([]*model.PipelineStage, 0, len(pp.Stages)) + ) + + shouldRollbackCustomSync := false + for i, s := range pp.Stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: false, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + if s.Name == model.StageCustomSync { + shouldRollbackCustomSync = true + } + out = append(out, stage) + } + + if autoRollback { + if shouldRollbackCustomSync { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageCustomSyncRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } else { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + } + + return out +} diff --git a/pkg/app/pipedv1/planner/planner.go b/pkg/app/pipedv1/planner/planner.go new file mode 100644 index 0000000000..e3e6a244d8 --- /dev/null +++ b/pkg/app/pipedv1/planner/planner.go @@ -0,0 +1,77 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package planner provides a piped component +// that decides the deployment pipeline of a deployment. +// The planner bases on the changes from git commits +// then builds the deployment manifests to know the behavior of the deployment. +// From that behavior the planner can decides which pipeline should be applied. +package planner + +import ( + "context" + "strings" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" + "github.com/pipe-cd/pipecd/pkg/regexpool" +) + +type Planner interface { + Plan(ctx context.Context, in Input) (Output, error) +} + +type gitClient interface { + Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) +} + +type Input struct { + ApplicationID string + ApplicationName string + GitPath model.ApplicationGitPath + Trigger model.DeploymentTrigger + MostRecentSuccessfulCommitHash string + PipedConfig *config.PipedSpec + TargetDSP deploysource.Provider + RunningDSP deploysource.Provider + AppManifestsCache cache.Cache + RegexPool *regexpool.Pool + GitClient gitClient + Logger *zap.Logger +} + +type Output struct { + Version string + Versions []*model.ArtifactVersion + SyncStrategy model.SyncStrategy + Summary string + Stages []*model.PipelineStage +} + +// MakeInitialStageMetadata makes the initial metadata for the given state configuration. +func MakeInitialStageMetadata(cfg config.PipelineStage) map[string]string { + switch cfg.Name { + case model.StageWaitApproval: + return map[string]string{ + "Approvers": strings.Join(cfg.WaitApprovalStageOptions.Approvers, ","), + } + default: + return nil + } +} diff --git a/pkg/app/pipedv1/planner/predefined_stages.go b/pkg/app/pipedv1/planner/predefined_stages.go new file mode 100644 index 0000000000..f38f7aeaa6 --- /dev/null +++ b/pkg/app/pipedv1/planner/predefined_stages.go @@ -0,0 +1,80 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planner + +import ( + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + PredefinedStageK8sSync = "K8sSync" + PredefinedStageTerraformSync = "TerraformSync" + PredefinedStageCloudRunSync = "CloudRunSync" + PredefinedStageLambdaSync = "LambdaSync" + PredefinedStageECSSync = "ECSSync" + PredefinedStageRollback = "Rollback" + PredefinedStageCustomSyncRollback = "CustomSyncRollback" + PredefinedStageScriptRunRollback = "ScriptRunRollback" +) + +var predefinedStages = map[string]config.PipelineStage{ + PredefinedStageK8sSync: { + ID: PredefinedStageK8sSync, + Name: model.StageK8sSync, + Desc: "Sync by applying all manifests", + }, + PredefinedStageTerraformSync: { + ID: PredefinedStageTerraformSync, + Name: model.StageTerraformSync, + Desc: "Sync by automatically applying any detected changes", + }, + PredefinedStageCloudRunSync: { + ID: PredefinedStageCloudRunSync, + Name: model.StageCloudRunSync, + Desc: "Deploy the new version and configure all traffic to it", + }, + PredefinedStageLambdaSync: { + ID: PredefinedStageLambdaSync, + Name: model.StageLambdaSync, + Desc: "Deploy the new version and configure all traffic to it", + }, + PredefinedStageECSSync: { + ID: PredefinedStageECSSync, + Name: model.StageECSSync, + Desc: "Deploy the new version and configure all traffic to it", + }, + PredefinedStageRollback: { + ID: PredefinedStageRollback, + Name: model.StageRollback, + Desc: "Rollback the deployment", + }, + PredefinedStageCustomSyncRollback: { + ID: PredefinedStageCustomSyncRollback, + Name: model.StageCustomSyncRollback, + Desc: "Rollback the custom stages", + }, + PredefinedStageScriptRunRollback: { + ID: PredefinedStageScriptRunRollback, + Name: model.StageScriptRunRollback, + Desc: "Rollback the script run stage", + }, +} + +// GetPredefinedStage finds and returns the predefined stage for the given id. +func GetPredefinedStage(id string) (config.PipelineStage, bool) { + stage, ok := predefinedStages[id] + return stage, ok +} diff --git a/pkg/app/pipedv1/planner/registry/registry.go b/pkg/app/pipedv1/planner/registry/registry.go new file mode 100644 index 0000000000..aa9e4df97c --- /dev/null +++ b/pkg/app/pipedv1/planner/registry/registry.go @@ -0,0 +1,76 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "fmt" + "sync" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner/cloudrun" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner/ecs" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner/kubernetes" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner/lambda" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner/terraform" + "github.com/pipe-cd/pipecd/pkg/model" +) + +type Registry interface { + Planner(k model.ApplicationKind) (planner.Planner, bool) +} + +type registry struct { + planners map[model.ApplicationKind]planner.Planner + mu sync.RWMutex +} + +func (r *registry) Register(k model.ApplicationKind, p planner.Planner) error { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.planners[k]; ok { + return fmt.Errorf("planner for %v application kind has already been registered", k) + } + r.planners[k] = p + return nil +} + +func (r *registry) Planner(k model.ApplicationKind) (planner.Planner, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + + p, ok := r.planners[k] + if !ok { + return nil, false + } + return p, true +} + +var defaultRegistry = ®istry{ + planners: make(map[model.ApplicationKind]planner.Planner), +} + +func DefaultRegistry() Registry { + return defaultRegistry +} + +// init registers all planners to the default registry. +func init() { + cloudrun.Register(defaultRegistry) + kubernetes.Register(defaultRegistry) + lambda.Register(defaultRegistry) + terraform.Register(defaultRegistry) + ecs.Register(defaultRegistry) +} diff --git a/pkg/app/pipedv1/planner/terraform/pipeline.go b/pkg/app/pipedv1/planner/terraform/pipeline.go new file mode 100644 index 0000000000..76fb7c9b6a --- /dev/null +++ b/pkg/app/pipedv1/planner/terraform/pipeline.go @@ -0,0 +1,114 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "fmt" + "time" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + s, _ = planner.GetPredefinedStage(planner.PredefinedStageTerraformSync) + out = make([]*model.PipelineStage, 0, 2) + ) + + // Append SYNC stage. + id := s.ID + if id == "" { + id = "stage-0" + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: 0, + Predefined: true, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + out = append(out, stage) + + // Append ROLLBACK stage if auto rollback is enabled. + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} + +func buildProgressivePipeline(pp *config.DeploymentPipeline, autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + out = make([]*model.PipelineStage, 0, len(pp.Stages)) + ) + + for i, s := range pp.Stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: false, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + + return out +} diff --git a/pkg/app/pipedv1/planner/terraform/terraform.go b/pkg/app/pipedv1/planner/terraform/terraform.go new file mode 100644 index 0000000000..5b58dd0dcc --- /dev/null +++ b/pkg/app/pipedv1/planner/terraform/terraform.go @@ -0,0 +1,115 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "context" + "fmt" + "io" + "time" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/terraform" + "github.com/pipe-cd/pipecd/pkg/model" +) + +// Planner plans the deployment pipeline for terraform application. +type Planner struct { +} + +type registerer interface { + Register(k model.ApplicationKind, p planner.Planner) error +} + +// Register registers this planner into the given registerer. +func Register(r registerer) { + r.Register(model.ApplicationKind_TERRAFORM, &Planner{}) +} + +// Plan decides which pipeline should be used for the given input. +func (p *Planner) Plan(ctx context.Context, in planner.Input) (out planner.Output, err error) { + ds, err := in.TargetDSP.Get(ctx, io.Discard) + if err != nil { + err = fmt.Errorf("error while preparing deploy source data (%v)", err) + return + } + + cfg := ds.ApplicationConfig.TerraformApplicationSpec + if cfg == nil { + err = fmt.Errorf("missing TerraformApplicationSpec in application configuration") + return + } + + // In case the strategy has been decided by trigger. + // For example: user triggered the deployment via web console. + switch in.Trigger.SyncStrategy { + case model.SyncStrategy_QUICK_SYNC: + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(cfg.Input.AutoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + case model.SyncStrategy_PIPELINE: + if cfg.Pipeline == nil { + err = fmt.Errorf("unable to force sync with pipeline because no pipeline was specified") + return + } + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, cfg.Input.AutoRollback, time.Now()) + out.Summary = in.Trigger.StrategySummary + return + } + + now := time.Now() + out.Version = "N/A" + + files, err := provider.LoadTerraformFiles(ds.AppDir) + if err != nil { + return + } + + if versions, e := provider.FindArtifactVersions(files); e != nil || len(versions) == 0 { + in.Logger.Warn("unable to determine target versions", zap.Error(e)) + out.Versions = []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_UNKNOWN, + Version: "unknown", + }, + } + } else { + out.Versions = versions + } + + if cfg.Pipeline == nil || len(cfg.Pipeline.Stages) == 0 { + out.SyncStrategy = model.SyncStrategy_QUICK_SYNC + out.Stages = buildQuickSyncPipeline(cfg.Input.AutoRollback, now) + out.Summary = "Quick sync by automatically applying any detected changes because no pipeline was configured" + return + } + + // Force to use pipeline when the alwaysUsePipeline field was configured. + if cfg.Planner.AlwaysUsePipeline { + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, cfg.Input.AutoRollback, time.Now()) + out.Summary = "Sync with the specified pipeline (alwaysUsePipeline was set)" + return + } + + out.SyncStrategy = model.SyncStrategy_PIPELINE + out.Stages = buildProgressivePipeline(cfg.Pipeline, cfg.Input.AutoRollback, now) + out.Summary = "Sync with the specified progressive pipeline" + return +} diff --git a/pkg/app/pipedv1/planpreview/builder.go b/pkg/app/pipedv1/planpreview/builder.go new file mode 100644 index 0000000000..eb465777a3 --- /dev/null +++ b/pkg/app/pipedv1/planpreview/builder.go @@ -0,0 +1,410 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planpreview + +import ( + "bytes" + "context" + "fmt" + "os" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner/registry" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/trigger" + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + "github.com/pipe-cd/pipecd/pkg/backoff" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" + "github.com/pipe-cd/pipecd/pkg/regexpool" +) + +const ( + workspacePattern = "plan-preview-builder-*" + defaultWorkerAppNum = 3 + maxWorkerNum = 100 +) + +var ( + defaultPlannerRegistry = registry.DefaultRegistry() +) + +type lastTriggeredCommitGetter interface { + Get(ctx context.Context, applicationID string) (string, error) +} + +type Builder interface { + Build(ctx context.Context, id string, cmd model.Command_BuildPlanPreview) ([]*model.ApplicationPlanPreviewResult, error) +} + +type builder struct { + gitClient gitClient + apiClient apiClient + applicationLister applicationLister + commitGetter lastTriggeredCommitGetter + secretDecrypter secretDecrypter + appManifestsCache cache.Cache + regexPool *regexpool.Pool + pipedCfg *config.PipedSpec + logger *zap.Logger + + workingDir string + repoCfg config.PipedRepository +} + +func newBuilder( + gc gitClient, + ac apiClient, + al applicationLister, + cg lastTriggeredCommitGetter, + sd secretDecrypter, + amc cache.Cache, + rp *regexpool.Pool, + cfg *config.PipedSpec, + logger *zap.Logger, +) *builder { + + return &builder{ + gitClient: gc, + apiClient: ac, + applicationLister: al, + commitGetter: cg, + secretDecrypter: sd, + appManifestsCache: amc, + regexPool: rp, + pipedCfg: cfg, + logger: logger.Named("plan-preview-builder"), + } +} + +func (b *builder) Build(ctx context.Context, id string, cmd model.Command_BuildPlanPreview) (results []*model.ApplicationPlanPreviewResult, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("an unexpected panic occurred (%v)", r) + b.logger.Error("unexpected panic", zap.Error(err)) + } + }() + + return b.build(ctx, id, cmd) +} + +func (b *builder) build(ctx context.Context, id string, cmd model.Command_BuildPlanPreview) ([]*model.ApplicationPlanPreviewResult, error) { + logger := b.logger.With(zap.String("command", id)) + logger.Info(fmt.Sprintf("start building planpreview result for command %s", id)) + + // Ensure the existence of the working directory. + workingDir, err := os.MkdirTemp("", workspacePattern) + if err != nil { + return nil, fmt.Errorf("failed to create working directory (%w)", err) + } + defer os.RemoveAll(workingDir) + b.workingDir = workingDir + + // Find the registered repository in Piped config and validate the command's payload against it. + repoCfg, ok := b.pipedCfg.GetRepository(cmd.RepositoryId) + if !ok { + return nil, fmt.Errorf("repository %s was not found in Piped config", cmd.RepositoryId) + } + if repoCfg.Branch != cmd.BaseBranch { + return nil, fmt.Errorf("base branch of repository %s was not matched, requested %s, expected %s", cmd.RepositoryId, cmd.BaseBranch, repoCfg.Branch) + } + b.repoCfg = repoCfg + + // List all applications that belong to this Piped + // and are placed in the given repository. + apps := b.listApplications(repoCfg) + if len(apps) == 0 { + logger.Info(fmt.Sprintf("there is no target application for command %s", id)) + return nil, nil + } + + // Prepare source code at the head commit. + // This clones the base branch and merges the head branch into it for correct data. + // Because new changes might be added into the base branch after the head branch had checked out. + repo, err := b.cloneHeadCommit(ctx, cmd.HeadBranch, cmd.HeadCommit) + if err != nil { + return nil, err + } + + // We added a merge commit so the commit ID was changed. + mergedCommit, err := repo.GetLatestCommit(ctx) + if err != nil { + return nil, err + } + + // Find all applications that should be triggered. + triggerApps, failedResults := b.findTriggerApps(ctx, repo, apps, mergedCommit.Hash) + results := failedResults + + if len(triggerApps) == 0 { + return results, nil + } + + // Plan the trigger applications for more detailed feedback. + var ( + numApps = len(triggerApps) + appCh = make(chan *model.Application, numApps) + resultCh = make(chan *model.ApplicationPlanPreviewResult, numApps) + ) + // Optimize the number of workers. + numWorkers := numApps / defaultWorkerAppNum + if numWorkers < 1 { + numWorkers = numApps + } + if numWorkers > maxWorkerNum { + numWorkers = maxWorkerNum + } + + // Start some workers to speed up building time. + logger.Info(fmt.Sprintf("start %d workers for building plan-preview results for %d applications", numWorkers, numApps)) + for w := 0; w < numWorkers; w++ { + go func(wid int) { + logger.Info("app worker for plan-preview started", zap.Int("worker", wid)) + for app := range appCh { + resultCh <- b.buildApp(ctx, wid, id, app, repo, mergedCommit.Hash) + } + logger.Info("app worker for plan-preview stopped", zap.Int("worker", wid)) + }(w) + } + + // Add all applications into the channel for start handling. + for i := 0; i < numApps; i++ { + appCh <- triggerApps[i] + } + close(appCh) + + // Wait and collect all results. + for i := 0; i < numApps; i++ { + r := <-resultCh + results = append(results, r) + } + + logger.Info("successfully collected plan-preview results of all applications") + return results, nil +} + +func (b *builder) buildApp(ctx context.Context, worker int, command string, app *model.Application, repo git.Repo, mergedCommit string) *model.ApplicationPlanPreviewResult { + logger := b.logger.With( + zap.Int("worker", worker), + zap.String("command", command), + zap.String("app-id", app.Id), + zap.String("app-name", app.Name), + zap.String("app-kind", app.Kind.String()), + ) + + logger.Info("will decide sync strategy for an application") + + r := model.MakeApplicationPlanPreviewResult(*app) + + var preCommit string + // Find the commit of the last successful deployment. + if deploy, err := b.getMostRecentlySuccessfulDeployment(ctx, app.Id); err == nil { + preCommit = deploy.Trigger.Commit.Hash + } else if status.Code(err) != codes.NotFound { + r.Error = fmt.Sprintf("failed while finding the last successful deployment (%v)", err) + return r + } + + targetDSP := deploysource.NewProvider( + b.workingDir, + deploysource.NewLocalSourceCloner(repo, "target", mergedCommit), + *app.GitPath, + b.secretDecrypter, + ) + + strategy, err := b.plan(ctx, app, targetDSP, preCommit) + if err != nil { + r.Error = fmt.Sprintf("failed while planning, %v", err) + return r + } + r.SyncStrategy = strategy + + logger.Info("successfully decided sync strategy for a application", zap.String("strategy", strategy.String())) + + var buf bytes.Buffer + var dr *diffResult + + switch app.Kind { + case model.ApplicationKind_KUBERNETES: + dr, err = b.kubernetesDiff(ctx, app, targetDSP, preCommit, &buf) + case model.ApplicationKind_TERRAFORM: + dr, err = b.terraformDiff(ctx, app, targetDSP, &buf) + case model.ApplicationKind_CLOUDRUN: + dr, err = b.cloudrundiff(ctx, app, targetDSP, preCommit, &buf) + default: + // TODO: Calculating planpreview's diff for other application kinds. + dr = &diffResult{ + summary: fmt.Sprintf("%s application is not implemented yet (coming soon)", app.Kind.String()), + } + } + + if dr != nil { + r.PlanSummary = []byte(dr.summary) + r.NoChange = dr.noChange + } + r.PlanDetails = buf.Bytes() + + if err != nil { + r.Error = fmt.Sprintf("failed while calculating diff, %v", err) + return r + } + + return r +} + +type diffResult struct { + summary string + noChange bool +} + +func (b *builder) cloneHeadCommit(ctx context.Context, headBranch, headCommit string) (git.Repo, error) { + dir, err := os.MkdirTemp(b.workingDir, "") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory %w", err) + } + + var ( + remote = b.repoCfg.Remote + baseBranch = b.repoCfg.Branch + ) + repo, err := b.gitClient.Clone(ctx, b.repoCfg.RepoID, remote, baseBranch, dir) + if err != nil { + return nil, fmt.Errorf("failed to clone git repository %s at branch %s", b.repoCfg.RepoID, baseBranch) + } + + mergeCommitMessage := fmt.Sprintf("Plan-preview: merged %s commit from %s branch into %s base branch", headCommit, headBranch, baseBranch) + if err := repo.MergeRemoteBranch(ctx, headBranch, headCommit, mergeCommitMessage); err != nil { + return nil, fmt.Errorf("detected conflicts between commit %s at %s branch and the base branch %s (%w)", headCommit, headBranch, baseBranch, err) + } + + return repo, nil +} + +func (b *builder) findTriggerApps(ctx context.Context, repo git.Repo, apps []*model.Application, headCommit string) (triggerApps []*model.Application, failedResults []*model.ApplicationPlanPreviewResult) { + d := trigger.NewOnCommitDeterminer(repo, headCommit, b.commitGetter, b.logger) + determine := func(app *model.Application) (bool, error) { + appCfg, err := config.LoadApplication(repo.GetPath(), app.GitPath.GetApplicationConfigFilePath(), app.Kind) + if err != nil { + return false, err + } + return d.ShouldTrigger(ctx, app, appCfg) + } + + for _, app := range apps { + shouldTrigger, err := determine(app) + if shouldTrigger { + triggerApps = append(triggerApps, app) + continue + } + if err == nil { + continue + } + + r := model.MakeApplicationPlanPreviewResult(*app) + r.Error = fmt.Sprintf("failed while determining the application should be triggered or not, %v", err) + failedResults = append(failedResults, r) + } + return +} + +func (b *builder) plan(ctx context.Context, app *model.Application, targetDSP deploysource.Provider, lastSuccessfulCommit string) (strategy model.SyncStrategy, err error) { + p, ok := defaultPlannerRegistry.Planner(app.Kind) + if !ok { + err = fmt.Errorf("application kind %s is not supported yet", app.Kind.String()) + return + } + + in := planner.Input{ + ApplicationID: app.Id, + ApplicationName: app.Name, + GitPath: *app.GitPath, + Trigger: model.DeploymentTrigger{ + Commit: &model.Commit{ + Branch: b.repoCfg.Branch, + Hash: targetDSP.Revision(), + }, + Commander: "pipectl", + }, + TargetDSP: targetDSP, + MostRecentSuccessfulCommitHash: lastSuccessfulCommit, + PipedConfig: b.pipedCfg, + AppManifestsCache: b.appManifestsCache, + RegexPool: b.regexPool, + Logger: b.logger, + } + + if lastSuccessfulCommit != "" { + in.RunningDSP = deploysource.NewProvider( + b.workingDir, + deploysource.NewGitSourceCloner(b.gitClient, b.repoCfg, "running", lastSuccessfulCommit), + *app.GitPath, + b.secretDecrypter, + ) + } + + out, err := p.Plan(ctx, in) + if err != nil { + return + } + + strategy = out.SyncStrategy + return +} + +func (b *builder) listApplications(repo config.PipedRepository) []*model.Application { + apps := b.applicationLister.List() + out := make([]*model.Application, 0, len(apps)) + + for _, app := range apps { + if app.GitPath.Repo.Id != repo.RepoID { + continue + } + if app.GitPath.Repo.Remote != repo.Remote { + continue + } + if app.GitPath.Repo.Branch != repo.Branch { + continue + } + out = append(out, app) + } + + return out +} + +func (b *builder) getMostRecentlySuccessfulDeployment(ctx context.Context, applicationID string) (*model.ApplicationDeploymentReference, error) { + retry := pipedservice.NewRetry(3) + + deploy, err := retry.Do(ctx, func() (interface{}, error) { + resp, err := b.apiClient.GetApplicationMostRecentDeployment(ctx, &pipedservice.GetApplicationMostRecentDeploymentRequest{ + ApplicationId: applicationID, + Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS, + }) + if err != nil { + return nil, backoff.NewError(err, pipedservice.Retriable(err)) + } + return resp.Deployment, nil + }) + if err != nil { + return nil, err + } + + return deploy.(*model.ApplicationDeploymentReference), nil +} diff --git a/pkg/app/pipedv1/planpreview/cloudrundiff.go b/pkg/app/pipedv1/planpreview/cloudrundiff.go new file mode 100644 index 0000000000..1497697a7d --- /dev/null +++ b/pkg/app/pipedv1/planpreview/cloudrundiff.go @@ -0,0 +1,125 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planpreview + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/cloudrun" + "github.com/pipe-cd/pipecd/pkg/diff" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func (b *builder) cloudrundiff( + ctx context.Context, + app *model.Application, + targetDSP deploysource.Provider, + lastCommit string, + buf *bytes.Buffer, +) (*diffResult, error) { + var ( + oldManifest, newManifest provider.ServiceManifest + err error + ) + + newManifest, err = b.loadCloudRunManifest(ctx, *app, targetDSP) + if err != nil { + fmt.Fprintf(buf, "failed to load cloud run manifest at the head commit (%v)\n", err) + return nil, err + } + + if lastCommit == "" { + fmt.Fprintf(buf, "failed to find the commit of the last successful deployment") + return nil, fmt.Errorf("cannot get the old manifest without the last successful deployment") + } + + runningDSP := deploysource.NewProvider( + b.workingDir, + deploysource.NewGitSourceCloner(b.gitClient, b.repoCfg, "running", lastCommit), + *app.GitPath, + b.secretDecrypter, + ) + oldManifest, err = b.loadCloudRunManifest(ctx, *app, runningDSP) + if err != nil { + fmt.Fprintf(buf, "failed to load cloud run manifest at the running commit (%v)\n", err) + return nil, err + } + + result, err := provider.Diff( + oldManifest, + newManifest, + diff.WithEquateEmpty(), + diff.WithCompareNumberAndNumericString(), + ) + if err != nil { + fmt.Fprintf(buf, "failed to compare manifests (%v)\n", err) + return nil, err + } + + summary := fmt.Sprintf("%d changes were detected", len(result.Diff.Nodes())) + if result.NoChange() { + fmt.Fprintln(buf, "No changes were detected") + return &diffResult{ + summary: "No changes were detected", + noChange: true, + }, nil + } + + details := result.Render(provider.DiffRenderOptions{ + UseDiffCommand: true, + }) + fmt.Fprintf(buf, "--- Last Deploy\n+++ Head Commit\n\n%s\n", details) + + return &diffResult{ + summary: summary, + }, nil + +} + +func (b *builder) loadCloudRunManifest(ctx context.Context, app model.Application, dsp deploysource.Provider) (provider.ServiceManifest, error) { + commit := dsp.Revision() + cache := provider.ServiceManifestCache{ + AppID: app.Id, + Cache: b.appManifestsCache, + Logger: b.logger, + } + + manifest, ok := cache.Get(commit) + if ok { + return manifest, nil + } + + ds, err := dsp.Get(ctx, io.Discard) + if err != nil { + return provider.ServiceManifest{}, err + } + + appCfg := ds.ApplicationConfig.CloudRunApplicationSpec + if appCfg == nil { + return provider.ServiceManifest{}, fmt.Errorf("malformed application configuration file") + } + + manifest, err = provider.LoadServiceManifest(ds.AppDir, appCfg.Input.ServiceManifestFile) + if err != nil { + return provider.ServiceManifest{}, err + } + + cache.Put(commit, manifest) + return manifest, nil +} diff --git a/pkg/app/pipedv1/planpreview/handler.go b/pkg/app/pipedv1/planpreview/handler.go new file mode 100644 index 0000000000..fd9cac24e0 --- /dev/null +++ b/pkg/app/pipedv1/planpreview/handler.go @@ -0,0 +1,298 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planpreview + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" + + metrics "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planpreview/planpreviewmetrics" + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" + "github.com/pipe-cd/pipecd/pkg/regexpool" +) + +const ( + defaultWorkerNum = 3 + defaultCommandQueueBufferSize = 10 + defaultCommandCheckInterval = 5 * time.Second + defaultCommandHandleTimeout = 5 * time.Minute +) + +type options struct { + workerNum int + commandQueueBufferSize int + commandCheckInterval time.Duration + commandHandleTimeout time.Duration + logger *zap.Logger +} + +type Option func(*options) + +func WithWorkerNum(n int) Option { + return func(opts *options) { + opts.workerNum = n + } +} + +func WithCommandQueueBufferSize(s int) Option { + return func(opts *options) { + opts.commandQueueBufferSize = s + } +} + +func WithCommandCheckInterval(i time.Duration) Option { + return func(opts *options) { + opts.commandCheckInterval = i + } +} + +func WithCommandHandleTimeout(t time.Duration) Option { + return func(opts *options) { + opts.commandHandleTimeout = t + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type gitClient interface { + Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) +} + +type apiClient interface { + GetApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.GetApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.GetApplicationMostRecentDeploymentResponse, error) +} + +type applicationLister interface { + List() []*model.Application +} + +type commandLister interface { + ListBuildPlanPreviewCommands() []model.ReportableCommand +} + +type secretDecrypter interface { + Decrypt(string) (string, error) +} + +type Handler struct { + gitClient gitClient + commandLister commandLister + + commandCh chan model.ReportableCommand + prevCommands map[string]struct{} + + options *options + builderFactory func() Builder + logger *zap.Logger +} + +func NewHandler( + gc gitClient, + ac apiClient, + cl commandLister, + al applicationLister, + cg lastTriggeredCommitGetter, + sd secretDecrypter, + appManifestsCache cache.Cache, + cfg *config.PipedSpec, + opts ...Option, +) *Handler { + + opt := &options{ + workerNum: defaultWorkerNum, + commandQueueBufferSize: defaultCommandQueueBufferSize, + commandCheckInterval: defaultCommandCheckInterval, + commandHandleTimeout: defaultCommandHandleTimeout, + logger: zap.NewNop(), + } + for _, o := range opts { + o(opt) + } + + h := &Handler{ + gitClient: gc, + commandLister: cl, + commandCh: make(chan model.ReportableCommand, opt.commandQueueBufferSize), + prevCommands: map[string]struct{}{}, + options: opt, + logger: opt.logger.Named("plan-preview-handler"), + } + + regexPool := regexpool.DefaultPool() + h.builderFactory = func() Builder { + return newBuilder(gc, ac, al, cg, sd, appManifestsCache, regexPool, cfg, h.logger) + } + + return h +} + +// Run starts running Handler until the given context has done. +func (h *Handler) Run(ctx context.Context) error { + h.logger.Info("start running planpreview handler") + + startWorker := func(ctx context.Context, cmdCh <-chan model.ReportableCommand) { + h.logger.Info("started a worker for handling plan-preview command") + for { + select { + case cmd := <-cmdCh: + h.handleCommand(ctx, cmd) + + case <-ctx.Done(): + h.logger.Info("a worker has been stopped") + return + } + } + } + + h.logger.Info(fmt.Sprintf("spawn %d worker to handle commands", h.options.workerNum)) + for i := 0; i < h.options.workerNum; i++ { + go startWorker(ctx, h.commandCh) + } + + commandTicker := time.NewTicker(h.options.commandCheckInterval) + defer commandTicker.Stop() + + for { + select { + case <-ctx.Done(): + h.logger.Info("planpreview handler has been stopped") + return nil + + case <-commandTicker.C: + h.enqueueNewCommands(ctx) + } + } +} + +func (h *Handler) enqueueNewCommands(ctx context.Context) { + h.logger.Debug("fetching unhandled commands to enqueue") + + commands := h.commandLister.ListBuildPlanPreviewCommands() + if len(commands) == 0 { + h.logger.Debug("there is no command to enqueue") + return + } + + news := make([]model.ReportableCommand, 0, len(commands)) + cmds := make(map[string]struct{}, len(commands)) + for _, cmd := range commands { + cmds[cmd.Id] = struct{}{} + if _, ok := h.prevCommands[cmd.Id]; !ok { + news = append(news, cmd) + } + } + + h.logger.Info("fetched unhandled commands to enqueue", + zap.Any("pre-commands", h.prevCommands), + zap.Any("commands", cmds), + zap.Int("news", len(news)), + ) + + if len(news) == 0 { + h.logger.Info("there is no new command to enqueue") + return + } + + h.prevCommands = cmds + metrics.ReceivedCommands(len(news)) + h.logger.Info(fmt.Sprintf("will enqueue %d new commands", len(news))) + + for _, cmd := range news { + select { + case h.commandCh <- cmd: + h.logger.Info("queued a new new command", zap.String("command", cmd.Id)) + + case <-ctx.Done(): + return + } + } +} + +func (h *Handler) handleCommand(ctx context.Context, cmd model.ReportableCommand) { + start := time.Now() + logger := h.logger.With( + zap.String("command", cmd.Id), + ) + logger.Info("received a plan-preview command to handle") + + result := &model.PlanPreviewCommandResult{ + CommandId: cmd.Id, + PipedId: cmd.PipedId, + } + + reportError := func(err error) { + metrics.HandledCommand(metrics.StatusFailure, time.Since(start)) + + result.Error = err.Error() + output, err := json.Marshal(result) + if err != nil { + logger.Error("failed to marshal command result", zap.Error(err)) + } + + if err := cmd.Report(ctx, model.CommandStatus_COMMAND_FAILED, nil, output); err != nil { + logger.Error("failed to report command status", zap.Error(err)) + return + } + logger.Info("successfully reported a failure command") + } + + if cmd.BuildPlanPreview == nil { + reportError(fmt.Errorf("malformed command")) + return + } + + timeout := time.Duration(cmd.BuildPlanPreview.Timeout) * time.Second + if timeout == 0 { + timeout = h.options.commandHandleTimeout + } + buildCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + b := h.builderFactory() + appResults, err := b.Build(buildCtx, cmd.Id, *cmd.BuildPlanPreview) + if err != nil { + reportError(err) + return + } + + result.Results = appResults + output, err := json.Marshal(result) + if err != nil { + reportError(fmt.Errorf("failed to marshal command result (%w)", err)) + return + } + + if err := cmd.Report(ctx, model.CommandStatus_COMMAND_SUCCEEDED, nil, output); err != nil { + metrics.HandledCommand(metrics.StatusFailure, time.Since(start)) + logger.Error("failed to report command status", zap.Error(err)) + return + } + + metrics.HandledCommand(metrics.StatusSuccess, time.Since(start)) + logger.Info("successfully reported a success command") +} diff --git a/pkg/app/pipedv1/planpreview/handler_test.go b/pkg/app/pipedv1/planpreview/handler_test.go new file mode 100644 index 0000000000..e9b28c72e9 --- /dev/null +++ b/pkg/app/pipedv1/planpreview/handler_test.go @@ -0,0 +1,132 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planpreview + +import ( + "context" + "sort" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +type testCommandLister struct { + commands []model.Command +} + +func (l *testCommandLister) ListBuildPlanPreviewCommands() []model.ReportableCommand { + out := make([]model.ReportableCommand, 0, len(l.commands)) + for i := range l.commands { + out = append(out, model.ReportableCommand{ + Command: &l.commands[i], + Report: func(ctx context.Context, status model.CommandStatus, metadata map[string]string, output []byte) error { + return nil + }, + }) + } + return out +} + +type testBuilder struct { + recorder func(id string) +} + +func (b *testBuilder) Build(ctx context.Context, id string, cmd model.Command_BuildPlanPreview) ([]*model.ApplicationPlanPreviewResult, error) { + b.recorder(id) + return nil, nil +} + +func TestHandler(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cl := &testCommandLister{} + handledCommands := make([]string, 0) + var mu sync.Mutex + var wg sync.WaitGroup + + handler := NewHandler(nil, nil, cl, nil, nil, nil, nil, nil, + WithWorkerNum(2), + // Use a long interval because we will directly call enqueueNewCommands function in this test. + WithCommandCheckInterval(time.Hour), + ) + handler.builderFactory = func() Builder { + return &testBuilder{ + recorder: func(id string) { + defer wg.Done() + mu.Lock() + defer mu.Unlock() + handledCommands = append(handledCommands, id) + sort.Strings(handledCommands) + }, + } + } + go handler.Run(ctx) + + // CommandLister returns no command, + // then there is no new command. + handler.enqueueNewCommands(ctx) + + require.Equal(t, []string{}, handledCommands) + + // CommandLister returns 2 commands: 1, 2. + // both of them will be considered as new commands. + wg.Add(2) + cl.commands = []model.Command{ + { + Id: "1", + Type: model.Command_BUILD_PLAN_PREVIEW, + BuildPlanPreview: &model.Command_BuildPlanPreview{}, + }, + { + Id: "2", + Type: model.Command_BUILD_PLAN_PREVIEW, + BuildPlanPreview: &model.Command_BuildPlanPreview{}, + }, + } + handler.enqueueNewCommands(ctx) + wg.Wait() + require.Equal(t, []string{"1", "2"}, handledCommands) + + // CommandLister returns the same command list + // so no new command will be added. + handler.enqueueNewCommands(ctx) + require.Equal(t, []string{"1", "2"}, handledCommands) + + // CommandLister returns commands: 2, 3. + // then 3 will be considered as a new command. + wg.Add(1) + cl.commands = []model.Command{ + { + Id: "2", + Type: model.Command_BUILD_PLAN_PREVIEW, + BuildPlanPreview: &model.Command_BuildPlanPreview{}, + }, + { + Id: "3", + Type: model.Command_BUILD_PLAN_PREVIEW, + BuildPlanPreview: &model.Command_BuildPlanPreview{}, + }, + } + handler.enqueueNewCommands(ctx) + wg.Wait() + require.Equal(t, []string{"1", "2", "3"}, handledCommands) +} diff --git a/pkg/app/pipedv1/planpreview/kubernetesdiff.go b/pkg/app/pipedv1/planpreview/kubernetesdiff.go new file mode 100644 index 0000000000..656f60fd8e --- /dev/null +++ b/pkg/app/pipedv1/planpreview/kubernetesdiff.go @@ -0,0 +1,135 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planpreview + +import ( + "bytes" + "context" + "fmt" + "io" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + provider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" + "github.com/pipe-cd/pipecd/pkg/cache" + "github.com/pipe-cd/pipecd/pkg/diff" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func (b *builder) kubernetesDiff( + ctx context.Context, + app *model.Application, + targetDSP deploysource.Provider, + lastSuccessfulCommit string, + buf *bytes.Buffer, +) (*diffResult, error) { + + var oldManifests, newManifests []provider.Manifest + var err error + + newManifests, err = loadKubernetesManifests(ctx, *app, targetDSP, b.appManifestsCache, b.gitClient, b.logger) + if err != nil { + fmt.Fprintf(buf, "failed to load kubernetes manifests at the head commit (%v)\n", err) + return nil, err + } + + if lastSuccessfulCommit != "" { + runningDSP := deploysource.NewProvider( + b.workingDir, + deploysource.NewGitSourceCloner(b.gitClient, b.repoCfg, "running", lastSuccessfulCommit), + *app.GitPath, + b.secretDecrypter, + ) + oldManifests, err = loadKubernetesManifests(ctx, *app, runningDSP, b.appManifestsCache, b.gitClient, b.logger) + if err != nil { + fmt.Fprintf(buf, "failed to load kubernetes manifests at the running commit (%v)\n", err) + return nil, err + } + } + + result, err := provider.DiffList( + oldManifests, + newManifests, + b.logger, + diff.WithEquateEmpty(), + diff.WithCompareNumberAndNumericString(), + ) + if err != nil { + fmt.Fprintf(buf, "failed to compare manifests (%v)\n", err) + return nil, err + } + + if result.NoChange() { + fmt.Fprintln(buf, "No changes were detected") + return &diffResult{ + summary: "No changes were detected", + noChange: true, + }, nil + } + + summary := fmt.Sprintf("%d added manifests, %d changed manifests, %d deleted manifests", len(result.Adds), len(result.Changes), len(result.Deletes)) + details := result.Render(provider.DiffRenderOptions{ + MaskSecret: true, + UseDiffCommand: true, + }) + fmt.Fprintf(buf, "--- Last Deploy\n+++ Head Commit\n\n%s\n", details) + + return &diffResult{ + summary: summary, + }, nil +} + +func loadKubernetesManifests(ctx context.Context, app model.Application, dsp deploysource.Provider, manifestsCache cache.Cache, gc gitClient, logger *zap.Logger) (manifests []provider.Manifest, err error) { + commit := dsp.Revision() + cache := provider.AppManifestsCache{ + AppID: app.Id, + Cache: manifestsCache, + Logger: logger, + } + + manifests, ok := cache.Get(commit) + if ok { + return manifests, nil + } + + // When the manifests were not in the cache we have to load them. + ds, err := dsp.Get(ctx, io.Discard) + if err != nil { + return nil, err + } + + appCfg := ds.ApplicationConfig.KubernetesApplicationSpec + if appCfg == nil { + return nil, fmt.Errorf("malformed application configuration file") + } + + loader := provider.NewLoader( + app.Name, + ds.AppDir, + ds.RepoDir, + app.GitPath.ConfigFilename, + appCfg.Input, + gc, + logger, + ) + manifests, err = loader.LoadManifests(ctx) + if err != nil { + return nil, err + } + + cache.Put(commit, manifests) + return manifests, nil +} diff --git a/pkg/app/pipedv1/planpreview/planpreviewmetrics/metrics.go b/pkg/app/pipedv1/planpreview/planpreviewmetrics/metrics.go new file mode 100644 index 0000000000..cec25c5cd5 --- /dev/null +++ b/pkg/app/pipedv1/planpreview/planpreviewmetrics/metrics.go @@ -0,0 +1,79 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planpreviewmetrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + statusKey = "status" +) + +type Status string + +const ( + StatusSuccess Status = "success" + StatusFailure Status = "failure" +) + +var ( + commandReceivedTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "plan_preview_command_received_total", + Help: "Total number of plan-preview commands received at piped.", + }, + ) + commandHandledTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "plan_preview_command_handled_total", + Help: "Total number of plan-preview commands handled at piped.", + }, + []string{statusKey}, + ) + + commandHandlingSeconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "plan_preview_command_handling_seconds", + Help: "Histogram of handling seconds of plan-preview commands.", + Buckets: []float64{1, 10, 30, 60, 120, 300, 600}, + }, + []string{statusKey}, + ) +) + +func ReceivedCommands(n int) { + commandReceivedTotal.Add(float64(n)) +} + +func HandledCommand(s Status, d time.Duration) { + commandHandledTotal.With(prometheus.Labels{ + statusKey: string(s), + }).Inc() + + commandHandlingSeconds.With(prometheus.Labels{ + statusKey: string(s), + }).Observe(d.Seconds()) +} + +func Register(r prometheus.Registerer) { + r.MustRegister( + commandReceivedTotal, + commandHandledTotal, + commandHandlingSeconds, + ) +} diff --git a/pkg/app/pipedv1/planpreview/terraformdiff.go b/pkg/app/pipedv1/planpreview/terraformdiff.go new file mode 100644 index 0000000000..11aa7007ca --- /dev/null +++ b/pkg/app/pipedv1/planpreview/terraformdiff.go @@ -0,0 +1,119 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package planpreview + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" + terraformprovider "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/terraform" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" + "github.com/pipe-cd/pipecd/pkg/model" +) + +func (b *builder) terraformDiff( + ctx context.Context, + app *model.Application, + targetDSP deploysource.Provider, + buf *bytes.Buffer, +) (*diffResult, error) { + + cp, ok := b.pipedCfg.FindPlatformProvider(app.PlatformProvider, model.ApplicationKind_TERRAFORM) + if !ok { + err := fmt.Errorf("platform provider %s was not found in Piped config", app.PlatformProvider) + fmt.Fprintln(buf, err.Error()) + return nil, err + } + cpCfg := cp.TerraformConfig + + ds, err := targetDSP.Get(ctx, io.Discard) + if err != nil { + fmt.Fprintf(buf, "failed to prepare deploy source data at the head commit (%v)\n", err) + return nil, err + } + + appCfg := ds.ApplicationConfig.TerraformApplicationSpec + if appCfg == nil { + err := fmt.Errorf("missing Terraform spec field in application configuration") + fmt.Fprintln(buf, err.Error()) + return nil, err + } + + version := appCfg.Input.TerraformVersion + terraformPath, installed, err := toolregistry.DefaultRegistry().Terraform(ctx, version) + if err != nil { + fmt.Fprintf(buf, "unable to find the specified terraform version %q (%v)\n", version, err) + return nil, err + } + if installed { + b.logger.Info(fmt.Sprintf("terraform %q has just been installed to %q because of no pre-installed binary for that version", version, terraformPath)) + } + + vars := make([]string, 0, len(cpCfg.Vars)+len(appCfg.Input.Vars)) + vars = append(vars, cpCfg.Vars...) + vars = append(vars, appCfg.Input.Vars...) + flags := appCfg.Input.CommandFlags + envs := appCfg.Input.CommandEnvs + + executor := terraformprovider.NewTerraform( + terraformPath, + ds.AppDir, + terraformprovider.WithoutColor(), + terraformprovider.WithVars(vars), + terraformprovider.WithVarFiles(appCfg.Input.VarFiles), + terraformprovider.WithAdditionalFlags(flags.Shared, flags.Init, flags.Plan, flags.Apply), + terraformprovider.WithAdditionalEnvs(envs.Shared, envs.Init, envs.Plan, envs.Apply), + ) + + if err := executor.Init(ctx, buf); err != nil { + fmt.Fprintf(buf, "failed while executing terraform init (%v)\n", err) + return nil, err + } + + if ws := appCfg.Input.Workspace; ws != "" { + if err := executor.SelectWorkspace(ctx, ws); err != nil { + fmt.Fprintf(buf, "failed to select workspace %q (%v). You might need to create the workspace before using by command %q\n", + ws, + err, + "terraform workspace new "+ws, + ) + return nil, err + } + fmt.Fprintf(buf, "selected workspace %q\n", ws) + } + + result, err := executor.Plan(ctx, buf) + if err != nil { + fmt.Fprintf(buf, "failed while executing terraform plan (%v)\n", err) + return nil, err + } + + if result.NoChanges() { + fmt.Fprintln(buf, "No changes were detected") + return &diffResult{ + summary: "No changes were detected", + noChange: true, + }, nil + } + + summary := fmt.Sprintf("%d to import, %d to add, %d to change, %d to destroy", result.Imports, result.Adds, result.Changes, result.Destroys) + fmt.Fprintln(buf, summary) + return &diffResult{ + summary: summary, + }, nil +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/cache.go b/pkg/app/pipedv1/platformprovider/cloudrun/cache.go new file mode 100644 index 0000000000..b4f6ae909e --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/cache.go @@ -0,0 +1,68 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "errors" + "fmt" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/cache" +) + +type ServiceManifestCache struct { + AppID string + Cache cache.Cache + Logger *zap.Logger +} + +func (c ServiceManifestCache) Get(commit string) (ServiceManifest, bool) { + key := serviceManifestCacheKey(c.AppID, commit) + item, err := c.Cache.Get(key) + if err == nil { + return item.(ServiceManifest), true + } + + if errors.Is(err, cache.ErrNotFound) { + c.Logger.Info("service manifest were not found in cache", + zap.String("app-id", c.AppID), + zap.String("commit-hash", commit), + ) + return ServiceManifest{}, false + } + + c.Logger.Error("failed while retrieving service manifest from cache", + zap.String("app-id", c.AppID), + zap.String("commit-hash", commit), + zap.Error(err), + ) + return ServiceManifest{}, false +} + +func (c ServiceManifestCache) Put(commit string, sm ServiceManifest) { + key := serviceManifestCacheKey(c.AppID, commit) + if err := c.Cache.Put(key, sm); err != nil { + c.Logger.Error("failed while putting service manifest from cache", + zap.String("app-id", c.AppID), + zap.String("commit-hash", commit), + zap.Error(err), + ) + } +} + +func serviceManifestCacheKey(appID, commit string) string { + return fmt.Sprintf("%s/%s", appID, commit) +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/client.go b/pkg/app/pipedv1/platformprovider/cloudrun/client.go new file mode 100644 index 0000000000..9d8f8ae74f --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/client.go @@ -0,0 +1,208 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "fmt" + "net/http" + "os" + + "go.uber.org/zap" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + "google.golang.org/api/run/v1" +) + +type client struct { + projectID string + region string + client *run.APIService + logger *zap.Logger +} + +func newClient(ctx context.Context, projectID, region, credentialsFile string, logger *zap.Logger) (*client, error) { + c := &client{ + projectID: projectID, + region: region, + logger: logger.Named("cloudrun"), + } + + var options []option.ClientOption + if len(credentialsFile) > 0 { + data, err := os.ReadFile(credentialsFile) + if err != nil { + return nil, fmt.Errorf("unable to read credentials file (%w)", err) + } + options = append(options, option.WithCredentialsJSON(data)) + } + options = append(options, + option.WithEndpoint(fmt.Sprintf("https://%s-run.googleapis.com/", region)), + ) + + runClient, err := run.NewService(ctx, options...) + if err != nil { + return nil, err + } + c.client = runClient + + return c, nil +} + +func (c *client) Create(ctx context.Context, sm ServiceManifest) (*Service, error) { + svcCfg, err := sm.RunService() + if err != nil { + return nil, err + } + + var ( + svc = run.NewNamespacesServicesService(c.client) + parent = makeCloudRunParent(c.projectID) + call = svc.Create(parent, svcCfg) + ) + call.Context(ctx) + + service, err := call.Do() + if err != nil { + if e, ok := err.(*googleapi.Error); ok { + return nil, fmt.Errorf("failed to create service: code=%d, message=%s, details=%s", e.Code, e.Message, e.Details) + } + return nil, err + } + return (*Service)(service), nil +} + +func (c *client) Update(ctx context.Context, sm ServiceManifest) (*Service, error) { + svcCfg, err := sm.RunService() + if err != nil { + return nil, err + } + + var ( + svc = run.NewNamespacesServicesService(c.client) + name = makeCloudRunServiceName(c.projectID, sm.Name) + call = svc.ReplaceService(name, svcCfg) + ) + call.Context(ctx) + + service, err := call.Do() + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrServiceNotFound + } + return nil, err + } + return (*Service)(service), nil +} + +func (c *client) List(ctx context.Context, options *ListOptions) ([]*Service, string, error) { + var ( + svc = run.NewNamespacesServicesService(c.client) + parent = makeCloudRunParent(c.projectID) + call = svc.List(parent) + ) + call.Context(ctx) + if options.Limit != 0 { + call.Limit(options.Limit) + } + if options.LabelSelector != "" { + call.LabelSelector(options.LabelSelector) + } + if options.Cursor != "" { + call.Continue(options.Cursor) + } + + resp, err := call.Do() + if err != nil { + return nil, "", err + } + var cursor string + if resp.Metadata != nil { + cursor = resp.Metadata.Continue + } + + svcs := make([]*Service, 0, len(resp.Items)) + for i := range resp.Items { + svc := (*Service)(resp.Items[i]) + svcs = append(svcs, svc) + } + + return svcs, cursor, nil +} + +func (c *client) GetRevision(ctx context.Context, name string) (*Revision, error) { + var ( + svc = run.NewNamespacesRevisionsService(c.client) + id = makeCloudRunRevisionName(c.projectID, name) + call = svc.Get(id) + ) + call.Context(ctx) + + revision, err := call.Do() + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrRevisionNotFound + } + return nil, err + } + return (*Revision)(revision), nil +} + +func (c *client) ListRevisions(ctx context.Context, options *ListRevisionsOptions) ([]*Revision, string, error) { + var ( + rev = run.NewNamespacesRevisionsService(c.client) + parent = makeCloudRunParent(c.projectID) + call = rev.List(parent) + ) + call.Context(ctx) + if options.Limit != 0 { + call.Limit(options.Limit) + } + if options.LabelSelector != "" { + call.LabelSelector(options.LabelSelector) + } + if options.Cursor != "" { + call.Continue(options.Cursor) + } + + resp, err := call.Do() + if err != nil { + return nil, "", err + } + var cursor string + if resp.Metadata != nil { + cursor = resp.Metadata.Continue + } + + revs := make([]*Revision, 0, len(resp.Items)) + for i := range resp.Items { + rev := (*Revision)(resp.Items[i]) + revs = append(revs, rev) + } + + return revs, cursor, nil +} + +func makeCloudRunParent(projectID string) string { + return fmt.Sprintf("namespaces/%s", projectID) +} + +func makeCloudRunServiceName(projectID, serviceID string) string { + return fmt.Sprintf("namespaces/%s/services/%s", projectID, serviceID) +} + +func makeCloudRunRevisionName(projectID, revisionID string) string { + return fmt.Sprintf("namespaces/%s/revisions/%s", projectID, revisionID) +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/client_test.go b/pkg/app/pipedv1/platformprovider/cloudrun/client_test.go new file mode 100644 index 0000000000..2f26a4b39c --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/client_test.go @@ -0,0 +1,54 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMakeCloudRunParent(t *testing.T) { + t.Parallel() + + const projectID = "projectID" + got := makeCloudRunParent(projectID) + want := "namespaces/projectID" + assert.Equal(t, want, got) +} + +func TestMakeCloudRunServiceName(t *testing.T) { + t.Parallel() + + const ( + projectID = "projectID" + serviceID = "serviceID" + ) + got := makeCloudRunServiceName(projectID, serviceID) + want := "namespaces/projectID/services/serviceID" + assert.Equal(t, want, got) +} + +func TestMakeCloudRunRevisionName(t *testing.T) { + t.Parallel() + + const ( + projectID = "projectID" + revisionID = "revisionID" + ) + got := makeCloudRunRevisionName(projectID, revisionID) + want := "namespaces/projectID/revisions/revisionID" + assert.Equal(t, want, got) +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/cloudrun.go b/pkg/app/pipedv1/platformprovider/cloudrun/cloudrun.go new file mode 100644 index 0000000000..607a534e4f --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/cloudrun.go @@ -0,0 +1,318 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + "sync" + + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + "google.golang.org/api/run/v1" + + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + DefaultServiceManifestFilename = "service.yaml" +) + +var ( + ErrServiceNotFound = errors.New("not found") + ErrRevisionNotFound = errors.New("not found") +) + +var ( + TypeConditions = map[string]struct{}{ + "Active": struct{}{}, + "Ready": struct{}{}, + "ConfigurationsReady": struct{}{}, + "RoutesReady": struct{}{}, + "ContainerHealthy": struct{}{}, + "ResourcesAvailable": struct{}{}, + } + TypeHealthyServiceConditions = map[string]struct{}{ + "Ready": struct{}{}, + "ConfigurationsReady": struct{}{}, + "RoutesReady": struct{}{}, + } + TypeHealthyRevisionConditions = map[string]struct{}{ + "Ready": struct{}{}, + "Active": struct{}{}, + "ContainerHealthy": struct{}{}, + "ResourcesAvailable": struct{}{}, + } +) + +// Kind represents the kind of resource. +type Kind string + +const ( + KindService Kind = "Service" + KindRevision Kind = "Revision" +) + +type ( + Service run.Service + Revision run.Revision + + StatusConditions struct { + Kind Kind + TrueTypes map[string]struct{} + + // Eliminate duplicated messages with the same reason. + FalseMessages []string + UnknownMessages []string + } +) + +const ( + LabelManagedBy = "pipecd-dev-managed-by" // Always be piped. + LabelPiped = "pipecd-dev-piped" // The id of piped handling this application. + LabelApplication = "pipecd-dev-application" // The application this resource belongs to. + LabelCommitHash = "pipecd-dev-commit-hash" // Hash value of the deployed commit. + LabelRevisionName = "pipecd-dev-revision-name" // The name of revision. + ManagedByPiped = "piped" +) + +type Client interface { + Create(ctx context.Context, sm ServiceManifest) (*Service, error) + Update(ctx context.Context, sm ServiceManifest) (*Service, error) + List(ctx context.Context, options *ListOptions) ([]*Service, string, error) + GetRevision(ctx context.Context, name string) (*Revision, error) + ListRevisions(ctx context.Context, options *ListRevisionsOptions) ([]*Revision, string, error) +} + +type ListOptions struct { + Limit int64 + LabelSelector string + Cursor string +} + +type ListRevisionsOptions struct { + Limit int64 + LabelSelector string + Cursor string +} + +type Registry interface { + Client(ctx context.Context, name string, cfg *config.PlatformProviderCloudRunConfig, logger *zap.Logger) (Client, error) +} + +func LoadServiceManifest(appDir, serviceFilename string) (ServiceManifest, error) { + if serviceFilename == "" { + serviceFilename = DefaultServiceManifestFilename + } + path := filepath.Join(appDir, serviceFilename) + return loadServiceManifest(path) +} + +var defaultRegistry = ®istry{ + clients: make(map[string]Client), + newGroup: &singleflight.Group{}, +} + +func DefaultRegistry() Registry { + return defaultRegistry +} + +type registry struct { + clients map[string]Client + mu sync.RWMutex + newGroup *singleflight.Group +} + +func (r *registry) Client(ctx context.Context, name string, cfg *config.PlatformProviderCloudRunConfig, logger *zap.Logger) (Client, error) { + r.mu.RLock() + client, ok := r.clients[name] + r.mu.RUnlock() + if ok { + return client, nil + } + + c, err, _ := r.newGroup.Do(name, func() (interface{}, error) { + return newClient(ctx, cfg.Project, cfg.Region, cfg.CredentialsFile, logger) + }) + if err != nil { + return nil, err + } + + client = c.(Client) + r.mu.Lock() + r.clients[name] = client + r.mu.Unlock() + + return client, nil +} + +func MakeManagedByPipedSelector() string { + return fmt.Sprintf("%s=%s", LabelManagedBy, ManagedByPiped) +} + +func MakeRevisionNamesSelector(names []string) string { + return fmt.Sprintf("%s in (%s)", LabelRevisionName, strings.Join(names, ",")) +} + +func (s *Service) ServiceManifest() (ServiceManifest, error) { + r := (*run.Service)(s) + data, err := r.MarshalJSON() + if err != nil { + return ServiceManifest{}, err + } + return ParseServiceManifest(data) +} + +func (s *Service) UID() (string, bool) { + if s.Metadata == nil || s.Metadata.Uid == "" { + return "", false + } + return s.Metadata.Uid, true +} + +// ActiveRevisionNames returns all its active revisions which may handle the traffic. +func (s *Service) ActiveRevisionNames() []string { + if s.Status == nil { + return nil + } + tf := s.Status.Traffic + ret := make([]string, len(tf)) + for i := range tf { + ret[i] = tf[i].RevisionName + } + return ret +} + +func (s *Service) StatusConditions() *StatusConditions { + var ( + trueTypes = make(map[string]struct{}, len(TypeConditions)) + falseMsgs = make(map[string]string, len(TypeConditions)) + unknownMsgs = make(map[string]string, len(TypeConditions)) + ) + + if s.Status == nil { + return nil + } + for _, cond := range s.Status.Conditions { + if _, ok := TypeConditions[cond.Type]; !ok { + continue + } + switch cond.Status { + case "True": + trueTypes[cond.Type] = struct{}{} + case "False": + falseMsgs[cond.Reason] = cond.Message + default: + unknownMsgs[cond.Reason] = cond.Message + } + } + + fMsgs := make([]string, 0, len(falseMsgs)) + for _, v := range falseMsgs { + fMsgs = append(fMsgs, v) + } + + uMsgs := make([]string, 0, len(unknownMsgs)) + for _, v := range unknownMsgs { + uMsgs = append(uMsgs, v) + } + + return &StatusConditions{ + Kind: KindService, + TrueTypes: trueTypes, + FalseMessages: fMsgs, + UnknownMessages: uMsgs, + } +} + +func (r *Revision) RevisionManifest() (RevisionManifest, error) { + rev := (*run.Revision)(r) + data, err := rev.MarshalJSON() + if err != nil { + return RevisionManifest{}, err + } + return ParseRevisionManifest(data) +} + +func (r *Revision) StatusConditions() *StatusConditions { + var ( + trueTypes = make(map[string]struct{}, len(TypeConditions)) + falseMsgs = make(map[string]string, len(TypeConditions)) + unknownMsgs = make(map[string]string, len(TypeConditions)) + ) + + if r.Status == nil { + return nil + } + for _, cond := range r.Status.Conditions { + if _, ok := TypeConditions[cond.Type]; !ok { + continue + } + switch cond.Status { + case "True": + trueTypes[cond.Type] = struct{}{} + case "False": + falseMsgs[cond.Reason] = cond.Message + default: + unknownMsgs[cond.Reason] = cond.Message + } + } + + fMsgs := make([]string, 0, len(falseMsgs)) + for _, v := range falseMsgs { + fMsgs = append(fMsgs, v) + } + + uMsgs := make([]string, 0, len(unknownMsgs)) + for _, v := range unknownMsgs { + uMsgs = append(uMsgs, v) + } + + return &StatusConditions{ + Kind: KindRevision, + TrueTypes: trueTypes, + FalseMessages: fMsgs, + UnknownMessages: uMsgs, + } +} + +func (s *StatusConditions) HealthStatus() (model.CloudRunResourceState_HealthStatus, string) { + if s == nil { + return model.CloudRunResourceState_UNKNOWN, "Unexpected error while calculating: unable to find status" + } + + if len(s.FalseMessages) > 0 { + return model.CloudRunResourceState_OTHER, strings.Join(s.FalseMessages, "; ") + } + + if len(s.UnknownMessages) > 0 { + return model.CloudRunResourceState_UNKNOWN, strings.Join(s.UnknownMessages, "; ") + } + + mustPassConditions := TypeHealthyServiceConditions + if s.Kind == KindRevision { + mustPassConditions = TypeHealthyRevisionConditions + } + for k := range mustPassConditions { + if _, ok := s.TrueTypes[k]; !ok { + return model.CloudRunResourceState_UNKNOWN, fmt.Sprintf("Could not check status field %q", k) + } + } + return model.CloudRunResourceState_HEALTHY, "" +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/cloudrun_test.go b/pkg/app/pipedv1/platformprovider/cloudrun/cloudrun_test.go new file mode 100644 index 0000000000..3afc28daa4 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/cloudrun_test.go @@ -0,0 +1,610 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestLoadServiceManifest(t *testing.T) { + t.Parallel() + + const ( + appDir = "testdata" + serviceFile = "new_manifest.yaml" + ) + // Success + got, err := LoadServiceManifest(appDir, serviceFile) + require.NoError(t, err) + assert.NotEmpty(t, got) + + // Failure + _, err = LoadServiceManifest(appDir, "") + assert.Error(t, err) +} + +func TestMakeManagedByPipedSelector(t *testing.T) { + t.Parallel() + + want := "pipecd-dev-managed-by=piped" + got := MakeManagedByPipedSelector() + assert.Equal(t, want, got) +} + +func TestMakeRevisionNamesSelector(t *testing.T) { + t.Parallel() + + names := []string{"test-1", "test-2", "test-3"} + got := MakeRevisionNamesSelector(names) + want := "pipecd-dev-revision-name in (test-1,test-2,test-3)" + assert.Equal(t, want, got) +} + +func TestService(t *testing.T) { + t.Parallel() + + sm, err := ParseServiceManifest([]byte(serviceManifest)) + require.NoError(t, err) + + svc, err := sm.RunService() + require.NoError(t, err) + + // ServiceManifest + s := (*Service)(svc) + got, err := s.ServiceManifest() + require.NoError(t, err) + assert.Equal(t, sm, got) + + // UID + id, ok := s.UID() + assert.True(t, ok) + assert.Equal(t, "service-uid", id) + + // ActiveRevisionNames + names := s.ActiveRevisionNames() + assert.Len(t, names, 1) +} + +func TestService_HealthStatus(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifest string + want model.CloudRunResourceState_HealthStatus + }{ + { + name: "healthy", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + uid: service-uid + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +status: + observedGeneration: 65 + conditions: + - type: Ready + status: 'True' + lastTransitionTime: '2021-09-15T06:56:22.222303Z' + - type: ConfigurationsReady + status: 'True' + lastTransitionTime: '2021-09-15T06:55:41.885793Z' + - type: RoutesReady + status: 'True' + lastTransitionTime: '2021-09-15T06:56:22.338031Z' + latestReadyRevisionName: helloworld-v010-1234567 + latestCreatedRevisionName: helloworld-v010-1234567 + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: model.CloudRunResourceState_HEALTHY, + }, + { + name: "unknown: unable to find status", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + uid: service-uid + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: model.CloudRunResourceState_UNKNOWN, + }, + { + name: "unknown: unable to parse status", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + uid: service-uid + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +status: + observedGeneration: 65 + conditions: + - type: Ready + status: 'Unknown' + lastTransitionTime: '2021-09-15T06:56:22.222303Z' + - type: ConfigurationsReady + status: 'False' + lastTransitionTime: '2021-09-15T06:55:41.885793Z' + - type: RoutesReady + status: 'True' + lastTransitionTime: '2021-09-15T06:56:22.338031Z' + latestReadyRevisionName: helloworld-v010-1234567 + latestCreatedRevisionName: helloworld-v010-1234567 + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: model.CloudRunResourceState_OTHER, + }, + { + name: "unhealthy", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + uid: service-uid + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +status: + observedGeneration: 65 + conditions: + - type: Ready + status: 'False' + lastTransitionTime: '2021-09-15T06:56:22.222303Z' + - type: ConfigurationsReady + status: 'Unknown' + lastTransitionTime: '2021-09-15T06:55:41.885793Z' + - type: RoutesReady + status: 'Unknown' + lastTransitionTime: '2021-09-15T06:56:22.338031Z' + latestReadyRevisionName: helloworld-v010-1234567 + latestCreatedRevisionName: helloworld-v010-1234567 + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: model.CloudRunResourceState_OTHER, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + data := []byte(tc.manifest) + sm, err := ParseServiceManifest(data) + require.NoError(t, err) + + svc, err := sm.RunService() + require.NoError(t, err) + + s := (*Service)(svc) + got, _ := s.StatusConditions().HealthStatus() + require.Equal(t, tc.want, got) + }) + } +} + +func TestRevision(t *testing.T) { + t.Parallel() + + rm, err := ParseRevisionManifest([]byte(revisionManifest)) + require.NoError(t, err) + require.NotEmpty(t, rm) + + rev, err := rm.RunRevision() + require.NoError(t, err) + + r := (*Revision)(rev) + got, err := r.RevisionManifest() + require.NoError(t, err) + assert.Equal(t, rm, got) +} + +func TestRevision_HealthStatus(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifest string + want model.CloudRunResourceState_HealthStatus + }{ + { + name: "healthy", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Revision +metadata: + name: helloworld-v010-1234567 + namespace: '0123456789' + selfLink: /apis/serving.knative.dev/v1/namespaces/0123456789/revisions/helloworld-v010-1234567 + uid: 0123-456-789-101112-13141516 + resourceVersion: AAAAAAA + generation: 1 + creationTimestamp: '2022-01-28T07:46:53.981805Z' + labels: + serving.knative.dev/route: helloworld + serving.knative.dev/configuration: helloworld + serving.knative.dev/configurationGeneration: '3' + serving.knative.dev/service: helloworld + serving.knative.dev/serviceUid: 0123-456-789-101112-13141516 + cloud.googleapis.com/location: asia-northeast1 + annotations: + serving.knative.dev/creator: example@foo.iam.gserviceaccount.com + autoscaling.knative.dev/maxScale: '1' + run.googleapis.com/cpu-throttling: 'true' + ownerReferences: + - kind: Configuration + name: helloworld + uid: 0123-456-789-101112-13141516 + apiVersion: serving.knative.dev/v1 + controller: true + blockOwnerDeletion: true +spec: + containerConcurrency: 80 + timeoutSeconds: 300 + serviceAccountName: example@foo.iam.gserviceaccount.com + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi +status: + observedGeneration: 1 + conditions: + - type: Ready + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: Active + status: 'True' + lastTransitionTime: '2022-01-28T07:47:04.722527Z' + severity: Info + - type: ContainerHealthy + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: ResourcesAvailable + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.150114Z' + logUrl: https://console.cloud.google.com/logs + imageDigest: gcr.io/pipecd/helloworld@sha256:abcdefg +`, + want: model.CloudRunResourceState_HEALTHY, + }, + { + name: "unknown: unable to find status", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Revision +metadata: + name: helloworld-v010-1234567 + namespace: '0123456789' + selfLink: /apis/serving.knative.dev/v1/namespaces/0123456789/revisions/helloworld-v010-1234567 + uid: 0123-456-789-101112-13141516 + resourceVersion: AAAAAAA + generation: 1 + creationTimestamp: '2022-01-28T07:46:53.981805Z' + labels: + serving.knative.dev/route: helloworld + serving.knative.dev/configuration: helloworld + serving.knative.dev/configurationGeneration: '3' + serving.knative.dev/service: helloworld + serving.knative.dev/serviceUid: 0123-456-789-101112-13141516 + cloud.googleapis.com/location: asia-northeast1 + annotations: + serving.knative.dev/creator: example@foo.iam.gserviceaccount.com + autoscaling.knative.dev/maxScale: '1' + run.googleapis.com/cpu-throttling: 'true' + ownerReferences: + - kind: Configuration + name: helloworld + uid: 0123-456-789-101112-13141516 + apiVersion: serving.knative.dev/v1 + controller: true + blockOwnerDeletion: true +spec: + containerConcurrency: 80 + timeoutSeconds: 300 + serviceAccountName: example@foo.iam.gserviceaccount.com + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi +`, + want: model.CloudRunResourceState_UNKNOWN, + }, + { + name: "unknown: unable to parse status", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Revision +metadata: + name: helloworld-v010-1234567 + namespace: '0123456789' + selfLink: /apis/serving.knative.dev/v1/namespaces/0123456789/revisions/helloworld-v010-1234567 + uid: 0123-456-789-101112-13141516 + resourceVersion: AAAAAAA + generation: 1 + creationTimestamp: '2022-01-28T07:46:53.981805Z' + labels: + serving.knative.dev/route: helloworld + serving.knative.dev/configuration: helloworld + serving.knative.dev/configurationGeneration: '3' + serving.knative.dev/service: helloworld + serving.knative.dev/serviceUid: 0123-456-789-101112-13141516 + cloud.googleapis.com/location: asia-northeast1 + annotations: + serving.knative.dev/creator: example@foo.iam.gserviceaccount.com + autoscaling.knative.dev/maxScale: '1' + run.googleapis.com/cpu-throttling: 'true' + ownerReferences: + - kind: Configuration + name: helloworld + uid: 0123-456-789-101112-13141516 + apiVersion: serving.knative.dev/v1 + controller: true + blockOwnerDeletion: true +spec: + containerConcurrency: 80 + timeoutSeconds: 300 + serviceAccountName: example@foo.iam.gserviceaccount.com + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi +status: + observedGeneration: 1 + conditions: + - type: Ready + status: 'Unknown' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: Active + status: 'True' + lastTransitionTime: '2022-01-28T07:47:04.722527Z' + severity: Info + - type: ContainerHealthy + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: ResourcesAvailable + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.150114Z' + logUrl: https://console.cloud.google.com/logs + imageDigest: gcr.io/pipecd/helloworld@sha256:abcdefg +`, + want: model.CloudRunResourceState_UNKNOWN, + }, + { + name: "unhealthy", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Revision +metadata: + name: helloworld-v010-1234567 + namespace: '0123456789' + selfLink: /apis/serving.knative.dev/v1/namespaces/0123456789/revisions/helloworld-v010-1234567 + uid: 0123-456-789-101112-13141516 + resourceVersion: AAAAAAA + generation: 1 + creationTimestamp: '2022-01-28T07:46:53.981805Z' + labels: + serving.knative.dev/route: helloworld + serving.knative.dev/configuration: helloworld + serving.knative.dev/configurationGeneration: '3' + serving.knative.dev/service: helloworld + serving.knative.dev/serviceUid: 0123-456-789-101112-13141516 + cloud.googleapis.com/location: asia-northeast1 + annotations: + serving.knative.dev/creator: example@foo.iam.gserviceaccount.com + autoscaling.knative.dev/maxScale: '1' + run.googleapis.com/cpu-throttling: 'true' + ownerReferences: + - kind: Configuration + name: helloworld + uid: 0123-456-789-101112-13141516 + apiVersion: serving.knative.dev/v1 + controller: true + blockOwnerDeletion: true +spec: + containerConcurrency: 80 + timeoutSeconds: 300 + serviceAccountName: example@foo.iam.gserviceaccount.com + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi +status: + observedGeneration: 1 + conditions: + - type: Ready + status: 'False' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: Active + status: 'True' + lastTransitionTime: '2022-01-28T07:47:04.722527Z' + severity: Info + - type: ContainerHealthy + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: ResourcesAvailable + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.150114Z' + logUrl: https://console.cloud.google.com/logs + imageDigest: gcr.io/pipecd/helloworld@sha256:abcdefg +`, + want: model.CloudRunResourceState_OTHER, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + data := []byte(tc.manifest) + rm, err := ParseRevisionManifest(data) + require.NoError(t, err) + + rev, err := rm.RunRevision() + require.NoError(t, err) + + r := (*Revision)(rev) + got, _ := r.StatusConditions().HealthStatus() + require.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/diff.go b/pkg/app/pipedv1/platformprovider/cloudrun/diff.go new file mode 100644 index 0000000000..3ca3d1a0bb --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/diff.go @@ -0,0 +1,135 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/pipe-cd/pipecd/pkg/diff" +) + +const ( + diffCommand = "diff" +) + +type DiffResult struct { + Diff *diff.Result + Old ServiceManifest + New ServiceManifest +} + +func (d *DiffResult) NoChange() bool { + return len(d.Diff.Nodes()) == 0 +} + +func Diff(old, new ServiceManifest, opts ...diff.Option) (*DiffResult, error) { + // NOTE: This key may change when we support to ignore Cloud Run's drift detection. + key := old.Name + d, err := diff.DiffUnstructureds(*old.u, *new.u, key, opts...) + if err != nil { + return nil, err + } + if !d.HasDiff() { + return &DiffResult{Diff: d}, nil + } + ret := &DiffResult{ + Old: old, + New: new, + Diff: d, + } + return ret, nil +} + +type DiffRenderOptions struct { + // If true, use "diff" command to render. + UseDiffCommand bool +} + +func (d *DiffResult) Render(opt DiffRenderOptions) string { + var b strings.Builder + opts := []diff.RenderOption{ + diff.WithLeftPadding(1), + } + renderer := diff.NewRenderer(opts...) + if !opt.UseDiffCommand { + b.WriteString(renderer.Render(d.Diff.Nodes())) + } else { + d, err := diffByCommand(diffCommand, d.Old, d.New) + if err != nil { + b.WriteString(fmt.Sprintf("An error occurred while rendering diff (%v)", err)) + } else { + b.Write(d) + } + } + b.WriteString("\n") + + return b.String() +} + +func diffByCommand(command string, old, new ServiceManifest) ([]byte, error) { + oldBytes, err := old.YamlBytes() + if err != nil { + return nil, err + } + + newBytes, err := new.YamlBytes() + if err != nil { + return nil, err + } + + oldFile, err := os.CreateTemp("", "old") + if err != nil { + return nil, err + } + defer os.Remove(oldFile.Name()) + if _, err := oldFile.Write(oldBytes); err != nil { + return nil, err + } + + newFile, err := os.CreateTemp("", "new") + if err != nil { + return nil, err + } + defer os.Remove(newFile.Name()) + if _, err := newFile.Write(newBytes); err != nil { + return nil, err + } + + var stdout, stderr bytes.Buffer + cmd := exec.Command(command, "-u", "-N", oldFile.Name(), newFile.Name()) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + if stdout.Len() > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + if err != nil { + return nil, fmt.Errorf("failed to run diff, err = %w, %s", err, stderr.String()) + } + + // Remove two-line header from output. + data := bytes.TrimSpace(stdout.Bytes()) + rows := bytes.SplitN(data, []byte("\n"), 3) + if len(rows) == 3 { + return rows[2], nil + } + return data, nil +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/diff_test.go b/pkg/app/pipedv1/platformprovider/cloudrun/diff_test.go new file mode 100644 index 0000000000..0e91f0222b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/diff_test.go @@ -0,0 +1,165 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDiff(t *testing.T) { + t.Parallel() + + old, err := loadServiceManifest("testdata/old_manifest.yaml") + require.NoError(t, err) + require.NotEmpty(t, old) + + new, err := loadServiceManifest("testdata/new_manifest.yaml") + require.NoError(t, err) + require.NotEmpty(t, new) + + // Have diff. + got, err := Diff(old, new) + require.NoError(t, err) + require.NotEmpty(t, got) + + // Don't have diff. + got, err = Diff(old, old) + require.NoError(t, err) + require.NotEmpty(t, got) +} + +func TestDiffResult_NoChange(t *testing.T) { + t.Parallel() + + old, err := loadServiceManifest("testdata/old_manifest.yaml") + require.NoError(t, err) + require.NotEmpty(t, old) + + new, err := loadServiceManifest("testdata/new_manifest.yaml") + require.NoError(t, err) + require.NotEmpty(t, new) + + result, err := Diff(old, new) + require.NoError(t, err) + + got := result.NoChange() + require.False(t, got) +} + +func TestDiffResult_Render(t *testing.T) { + old, err := loadServiceManifest("testdata/old_manifest.yaml") + require.NoError(t, err) + + new, err := loadServiceManifest("testdata/new_manifest.yaml") + require.NoError(t, err) + + result, err := Diff(old, new) + require.NoError(t, err) + + // Not use diff command + opt := DiffRenderOptions{} + got := result.Render(opt) + want := ` spec: + template: + spec: + containers: + - + #spec.template.spec.containers.0.image +- image: gcr.io/pipecd/helloworld:v0.6.0 ++ image: gcr.io/pipecd/helloworld:v0.5.0 + + +` + require.Equal(t, want, got) + + // Use diff command + opt = DiffRenderOptions{UseDiffCommand: true} + got = result.Render(opt) + want = `@@ -18,7 +18,7 @@ + containers: + - args: + - server +- image: gcr.io/pipecd/helloworld:v0.6.0 ++ image: gcr.io/pipecd/helloworld:v0.5.0 + ports: + - containerPort: 9085 + name: http1 +` + require.Equal(t, want, got) +} + +func TestDiffByCommand(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + command string + oldManifest string + newManifest string + expected string + expectedErr bool + }{ + { + name: "no command", + command: "non-existent-diff", + oldManifest: "testdata/old_manifest.yaml", + newManifest: "testdata/old_manifest.yaml", + expected: "", + expectedErr: true, + }, + { + name: "no diff", + command: diffCommand, + oldManifest: "testdata/old_manifest.yaml", + newManifest: "testdata/old_manifest.yaml", + expected: "", + }, + { + name: "has diff", + command: diffCommand, + oldManifest: "testdata/old_manifest.yaml", + newManifest: "testdata/new_manifest.yaml", + expected: `@@ -18,7 +18,7 @@ + containers: + - args: + - server +- image: gcr.io/pipecd/helloworld:v0.6.0 ++ image: gcr.io/pipecd/helloworld:v0.5.0 + ports: + - containerPort: 9085 + name: http1`, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + old, err := loadServiceManifest(tc.oldManifest) + require.NoError(t, err) + + new, err := loadServiceManifest(tc.newManifest) + require.NoError(t, err) + + got, err := diffByCommand(tc.command, old, new) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tc.expected, string(got)) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/revisionmanifest.go b/pkg/app/pipedv1/platformprovider/cloudrun/revisionmanifest.go new file mode 100644 index 0000000000..1f1f624ac2 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/revisionmanifest.go @@ -0,0 +1,55 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "google.golang.org/api/run/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" +) + +type RevisionManifest struct { + Name string + u *unstructured.Unstructured +} + +func ParseRevisionManifest(data []byte) (RevisionManifest, error) { + var obj unstructured.Unstructured + if err := yaml.Unmarshal(data, &obj); err != nil { + return RevisionManifest{}, err + } + + return RevisionManifest{ + Name: obj.GetName(), + u: &obj, + }, nil +} + +func (r RevisionManifest) YamlBytes() ([]byte, error) { + return yaml.Marshal(r.u) +} + +func (r RevisionManifest) RunRevision() (*run.Revision, error) { + data, err := r.YamlBytes() + if err != nil { + return nil, err + } + + var rev run.Revision + if err := yaml.Unmarshal(data, &rev); err != nil { + return nil, err + } + return &rev, nil +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/revisionmanifest_test.go b/pkg/app/pipedv1/platformprovider/cloudrun/revisionmanifest_test.go new file mode 100644 index 0000000000..9d3bb6b8e4 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/revisionmanifest_test.go @@ -0,0 +1,104 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const revisionManifest = ` +apiVersion: serving.knative.dev/v1 +kind: Revision +metadata: + name: helloworld-v010-1234567 + namespace: '0123456789' + selfLink: /apis/serving.knative.dev/v1/namespaces/0123456789/revisions/helloworld-v010-1234567 + uid: 0123-456-789-101112-13141516 + resourceVersion: AAAAAAA + generation: 1 + creationTimestamp: '2022-01-28T07:46:53.981805Z' + labels: + serving.knative.dev/route: helloworld + serving.knative.dev/configuration: helloworld + serving.knative.dev/configurationGeneration: '3' + serving.knative.dev/service: helloworld + serving.knative.dev/serviceUid: 0123-456-789-101112-13141516 + cloud.googleapis.com/location: asia-northeast1 + annotations: + serving.knative.dev/creator: example@foo.iam.gserviceaccount.com + autoscaling.knative.dev/maxScale: '1' + run.googleapis.com/cpu-throttling: 'true' + ownerReferences: + - kind: Configuration + name: helloworld + uid: 0123-456-789-101112-13141516 + apiVersion: serving.knative.dev/v1 + controller: true + blockOwnerDeletion: true +spec: + containerConcurrency: 80 + timeoutSeconds: 300 + serviceAccountName: example@foo.iam.gserviceaccount.com + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi +status: + observedGeneration: 1 + conditions: + - type: Ready + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: Active + status: 'True' + lastTransitionTime: '2022-01-28T07:47:04.722527Z' + severity: Info + - type: ContainerHealthy + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.929438Z' + - type: ResourcesAvailable + status: 'True' + lastTransitionTime: '2022-01-28T07:46:58.150114Z' + logUrl: https://console.cloud.google.com/logs + imageDigest: gcr.io/pipecd/helloworld@sha256:abcdefg +` + +func TestRevisionManifest(t *testing.T) { + t.Parallel() + + rm, err := ParseRevisionManifest([]byte(revisionManifest)) + require.NoError(t, err) + require.NotEmpty(t, rm) + + // YamlBytes + data, err := rm.YamlBytes() + require.NoError(t, err) + assert.NotEmpty(t, data) + + // RunRevision + got, err := rm.RunRevision() + require.NoError(t, err) + assert.NotEmpty(t, got) +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/servicemanifest.go b/pkg/app/pipedv1/platformprovider/cloudrun/servicemanifest.go new file mode 100644 index 0000000000..cb3af939da --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/servicemanifest.go @@ -0,0 +1,236 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "fmt" + "os" + "strings" + + "google.golang.org/api/run/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/yaml" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +type ServiceManifest struct { + Name string + u *unstructured.Unstructured +} + +func (m ServiceManifest) SetRevision(name string) error { + return unstructured.SetNestedField(m.u.Object, name, "spec", "template", "metadata", "name") +} + +type RevisionTraffic struct { + RevisionName string `json:"revisionName"` + Percent int `json:"percent"` +} + +func (m ServiceManifest) UpdateTraffic(revisions []RevisionTraffic) error { + items := []interface{}{} + for i := range revisions { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&revisions[i]) + if err != nil { + return fmt.Errorf("unable to set traffic for object: %w", err) + } + items = append(items, out) + } + + return unstructured.SetNestedSlice(m.u.Object, items, "spec", "traffic") +} + +func (m ServiceManifest) UpdateAllTraffic(revision string) error { + return m.UpdateTraffic([]RevisionTraffic{ + { + RevisionName: revision, + Percent: 100, + }, + }) +} + +func (m ServiceManifest) YamlBytes() ([]byte, error) { + return yaml.Marshal(m.u) +} + +func (m ServiceManifest) Labels() map[string]string { + return m.u.GetLabels() +} + +func (m ServiceManifest) RevisionLabels() map[string]string { + v, _, _ := unstructured.NestedStringMap(m.u.Object, "spec", "template", "metadata", "labels") + return v +} + +func (m ServiceManifest) AppID() (string, bool) { + v := m.Labels() + if v == nil || v[LabelApplication] == "" { + return "", false + } + return v[LabelApplication], true +} + +func (m ServiceManifest) AddLabels(labels map[string]string) { + if len(labels) == 0 { + return + } + + lbls := m.u.GetLabels() + if lbls == nil { + m.u.SetLabels(labels) + return + } + for k, v := range labels { + lbls[k] = v + } + m.u.SetLabels(lbls) +} + +func (m ServiceManifest) AddRevisionLabels(labels map[string]string) error { + if len(labels) == 0 { + return nil + } + + fields := []string{"spec", "template", "metadata", "labels"} + lbls, ok, err := unstructured.NestedStringMap(m.u.Object, fields...) + if err != nil { + return err + } + if !ok { + return unstructured.SetNestedStringMap(m.u.Object, labels, fields...) + } + + for k, v := range labels { + lbls[k] = v + } + return unstructured.SetNestedStringMap(m.u.Object, lbls, fields...) +} + +func (m ServiceManifest) RunService() (*run.Service, error) { + data, err := m.YamlBytes() + if err != nil { + return nil, err + } + + var s run.Service + if err := yaml.Unmarshal(data, &s); err != nil { + return nil, err + } + return &s, nil +} + +func loadServiceManifest(path string) (ServiceManifest, error) { + data, err := os.ReadFile(path) + if err != nil { + return ServiceManifest{}, err + } + return ParseServiceManifest(data) +} + +func ParseServiceManifest(data []byte) (ServiceManifest, error) { + var obj unstructured.Unstructured + if err := yaml.Unmarshal(data, &obj); err != nil { + return ServiceManifest{}, err + } + + return ServiceManifest{ + Name: obj.GetName(), + u: &obj, + }, nil +} + +func DecideRevisionName(sm ServiceManifest, commit string) (string, error) { + tag, err := FindImageTag(sm) + if err != nil { + return "", err + } + tag = strings.ReplaceAll(tag, ".", "") + + if len(commit) > 7 { + commit = commit[:7] + } + return fmt.Sprintf("%s-%s-%s", sm.Name, tag, commit), nil +} + +func FindImageTag(sm ServiceManifest) (string, error) { + containers, ok, err := unstructured.NestedSlice(sm.u.Object, "spec", "template", "spec", "containers") + if err != nil { + return "", err + } + if !ok || len(containers) == 0 { + return "", fmt.Errorf("spec.template.spec.containers was missing") + } + + container, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&containers[0]) + if err != nil { + return "", fmt.Errorf("invalid container format") + } + + image, ok, err := unstructured.NestedString(container, "image") + if err != nil { + return "", err + } + if !ok || image == "" { + return "", fmt.Errorf("image was missing") + } + _, tag := parseContainerImage(image) + + return tag, nil +} + +func parseContainerImage(image string) (name, tag string) { + parts := strings.Split(image, ":") + if len(parts) == 2 { + tag = parts[1] + } + paths := strings.Split(parts[0], "/") + name = paths[len(paths)-1] + return +} + +func FindArtifactVersions(sm ServiceManifest) ([]*model.ArtifactVersion, error) { + containers, ok, err := unstructured.NestedSlice(sm.u.Object, "spec", "template", "spec", "containers") + if err != nil { + return nil, err + } + if !ok || len(containers) == 0 { + return nil, fmt.Errorf("spec.template.spec.containers was missing") + } + + container, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&containers[0]) + if err != nil { + return nil, fmt.Errorf("invalid container format") + } + + image, ok, err := unstructured.NestedString(container, "image") + if err != nil { + return nil, err + } + if !ok || image == "" { + return nil, fmt.Errorf("image was missing") + } + name, tag := parseContainerImage(image) + + return []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: tag, + Name: name, + Url: image, + }, + }, nil +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/servicemanifest_test.go b/pkg/app/pipedv1/platformprovider/cloudrun/servicemanifest_test.go new file mode 100644 index 0000000000..060d7143de --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/servicemanifest_test.go @@ -0,0 +1,427 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +const serviceManifest = ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + uid: service-uid + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +status: + observedGeneration: 5 + conditions: + - type: Ready + status: 'False' + reason: RevisionFailed + message: Revision helloworld-v010-1234567 is not ready. + lastTransitionTime: '2022-01-31T06:18:57.242172Z' + - type: ConfigurationsReady + status: 'False' + reason: ContainerMissing + message: Image 'gcr.io/pipecd/helloworld:v0.1.0' not found. + lastTransitionTime: '2022-01-31T06:18:57.177493Z' + - type: RoutesReady + status: 'False' + reason: RevisionFailed + message: Revision helloworld-v010-1234567 is not ready. + lastTransitionTime: '2022-01-31T06:18:57.242172Z' + latestReadyRevisionName: helloworld-v010-1234567 + latestCreatedRevisionName: helloworld-v010-1234567 + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +` + +func TestServiceManifest(t *testing.T) { + t.Parallel() + + sm, err := ParseServiceManifest([]byte(serviceManifest)) + require.NoError(t, err) + require.NotEmpty(t, sm) + + // SetRevision + err = sm.SetRevision("helloworld-v010-1234567") + require.NoError(t, err) + + // UpdateTraffic + traffics := []RevisionTraffic{ + { + RevisionName: "helloworld-v010-1234567", + Percent: 50, + }, + { + RevisionName: "helloworld-v011-2345678", + Percent: 50, + }, + } + err = sm.UpdateTraffic(traffics) + require.NoError(t, err) + + // YamlBytes + data, err := sm.YamlBytes() + require.NoError(t, err) + assert.NotEmpty(t, data) + + // AddLabels + labels := map[string]string{ + LabelPiped: "hoge", + LabelApplication: "foo", + } + sm.AddLabels(labels) + + // Labels + assert.Len(t, sm.Labels(), 4) + + // AppID + id, ok := sm.AppID() + assert.True(t, ok) + assert.Equal(t, "foo", id) + + // RunService + got, err := sm.RunService() + require.NoError(t, err) + assert.NotEmpty(t, got) + + // AddRevisionLabels + err = sm.AddRevisionLabels(labels) + require.NoError(t, err) + + labels[LabelRevisionName] = "revision" + err = sm.AddRevisionLabels(labels) + require.NoError(t, err) + + // RevisionLabels + v := sm.RevisionLabels() + assert.Equal(t, labels, v) +} + +func TestParseServiceManifest(t *testing.T) { + t.Parallel() + + // Success + data := []byte(serviceManifest) + sm, err := ParseServiceManifest(data) + require.NoError(t, err) + require.Equal(t, "helloworld", sm.Name) + + // Failure + data = []byte("error") + _, err = ParseServiceManifest(data) + require.Error(t, err) +} + +func TestDecideRevisionName(t *testing.T) { + t.Parallel() + + data := []byte(serviceManifest) + sm, err := ParseServiceManifest(data) + require.NoError(t, err) + + name, err := DecideRevisionName(sm, "12345678912345678") + require.NoError(t, err) + require.Equal(t, "helloworld-v010-1234567", name) +} + +func TestFindImageTag(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifest string + want string + wantErr bool + }{ + { + name: "ok", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: "v0.1.0", + wantErr: false, + }, + { + name: "err: containers missing", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 +`, + want: "", + wantErr: true, + }, + { + name: "err: image missing", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: "", + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + data := []byte(tc.manifest) + sm, err := ParseServiceManifest(data) + require.NoError(t, err) + + got, err := FindImageTag(sm) + if tc.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tc.want, got) + }) + } +} + +func TestFindArtifactVersions(t *testing.T) { + testcases := []struct { + name string + manifest string + want []*model.ArtifactVersion + wantErr bool + }{ + { + name: "ok", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v0.1.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v0.1.0", + }, + }, + wantErr: false, + }, + { + name: "err: containers missing", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 +`, + want: nil, + wantErr: true, + }, + { + name: "err: image missing", + manifest: ` +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + labels: + cloud.googleapis.com/location: asia-northeast1 + pipecd-dev-managed-by: piped + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v010-1234567 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v010-1234567 + percent: 100 +`, + want: nil, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + data := []byte(tc.manifest) + sm, err := ParseServiceManifest(data) + require.NoError(t, err) + + got, err := FindArtifactVersions(sm) + require.Equal(t, tc.wantErr, err != nil) + require.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/state.go b/pkg/app/pipedv1/platformprovider/cloudrun/state.go new file mode 100644 index 0000000000..92aaf90755 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/state.go @@ -0,0 +1,78 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "sort" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func MakeResourceStates(svc *Service, revs []*Revision, updatedAt time.Time) []*model.CloudRunResourceState { + states := make([]*model.CloudRunResourceState, 0, len(revs)+1) + + // Set service state. + sm, err := svc.ServiceManifest() + if err == nil { + status, desc := svc.StatusConditions().HealthStatus() + states = append(states, makeResourceState(sm.u, status, desc, updatedAt)) + } + + // Set active revision states. + for _, r := range revs { + rm, err := r.RevisionManifest() + if err != nil { + continue + } + + status, desc := r.StatusConditions().HealthStatus() + states = append(states, makeResourceState(rm.u, status, desc, updatedAt)) + } + return states +} + +func makeResourceState(obj *unstructured.Unstructured, status model.CloudRunResourceState_HealthStatus, desc string, updatedAt time.Time) *model.CloudRunResourceState { + var ( + owners = obj.GetOwnerReferences() + ownerIDs = make([]string, 0, len(owners)) + creationTime = obj.GetCreationTimestamp() + ) + + for _, owner := range owners { + ownerIDs = append(ownerIDs, string(owner.UID)) + } + sort.Strings(ownerIDs) + + state := &model.CloudRunResourceState{ + Id: string(obj.GetUID()), + OwnerIds: ownerIDs, + ParentIds: ownerIDs, + Name: obj.GetName(), + ApiVersion: obj.GetAPIVersion(), + Kind: obj.GetKind(), + Namespace: obj.GetNamespace(), + + HealthStatus: status, + HealthDescription: desc, + + CreatedAt: creationTime.Unix(), + UpdatedAt: updatedAt.Unix(), + } + + return state +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/state_test.go b/pkg/app/pipedv1/platformprovider/cloudrun/state_test.go new file mode 100644 index 0000000000..c291e97b30 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/state_test.go @@ -0,0 +1,52 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudrun + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestMakeResourceStates(t *testing.T) { + t.Parallel() + + sm, err := ParseServiceManifest([]byte(serviceManifest)) + require.NoError(t, err) + + svc, err := sm.RunService() + require.NoError(t, err) + + s := (*Service)(svc) + + rm, err := ParseRevisionManifest([]byte(revisionManifest)) + require.NoError(t, err) + + rev, err := rm.RunRevision() + require.NoError(t, err) + + r := (*Revision)(rev) + + // MakeResourceStates + rs := []*Revision{r} + states := MakeResourceStates(s, rs, time.Now()) + require.Len(t, states, 2) + assert.Equal(t, model.CloudRunResourceState_OTHER, states[0].HealthStatus) + assert.Equal(t, model.CloudRunResourceState_HEALTHY, states[1].HealthStatus) +} diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/testdata/new_manifest.yaml b/pkg/app/pipedv1/platformprovider/cloudrun/testdata/new_manifest.yaml new file mode 100644 index 0000000000..f00b9c2e26 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/testdata/new_manifest.yaml @@ -0,0 +1,32 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + labels: + cloud.googleapis.com/location: asia-northeast1 + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v050-0b13751 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v050-0b13751 + percent: 100 diff --git a/pkg/app/pipedv1/platformprovider/cloudrun/testdata/old_manifest.yaml b/pkg/app/pipedv1/platformprovider/cloudrun/testdata/old_manifest.yaml new file mode 100644 index 0000000000..b062780966 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/cloudrun/testdata/old_manifest.yaml @@ -0,0 +1,32 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + labels: + cloud.googleapis.com/location: asia-northeast1 + annotations: + run.googleapis.com/ingress: all + run.googleapis.com/ingress-status: all +spec: + template: + metadata: + name: helloworld-v050-0b13751 + annotations: + autoscaling.knative.dev/maxScale: '1' + spec: + containerConcurrency: 80 + timeoutSeconds: 300 + containers: + - image: gcr.io/pipecd/helloworld:v0.6.0 + args: + - server + ports: + - name: http1 + containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi + traffic: + - revisionName: helloworld-v050-0b13751 + percent: 100 diff --git a/pkg/app/pipedv1/platformprovider/ecs/client.go b/pkg/app/pipedv1/platformprovider/ecs/client.go new file mode 100644 index 0000000000..a13cff5015 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/client.go @@ -0,0 +1,508 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/ecs" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elbtypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider" + "github.com/pipe-cd/pipecd/pkg/backoff" + appconfig "github.com/pipe-cd/pipecd/pkg/config" +) + +const ( + // ServiceStable's constants. + retryServiceStable = 40 + retryServiceStableInterval = 15 * time.Second + + // TaskSetStable's constants. + retryTaskSetStable = 40 + retryTaskSetStableInterval = 15 * time.Second +) + +type client struct { + ecsClient *ecs.Client + elbClient *elasticloadbalancingv2.Client + logger *zap.Logger +} + +func newClient(region, profile, credentialsFile, roleARN, tokenPath string, logger *zap.Logger) (Client, error) { + if region == "" { + return nil, fmt.Errorf("region is required field") + } + + c := &client{ + logger: logger.Named("ecs"), + } + + optFns := []func(*config.LoadOptions) error{config.WithRegion(region)} + if credentialsFile != "" { + optFns = append(optFns, config.WithSharedCredentialsFiles([]string{credentialsFile})) + } + if profile != "" { + optFns = append(optFns, config.WithSharedConfigProfile(profile)) + } + if tokenPath != "" && roleARN != "" { + optFns = append(optFns, config.WithWebIdentityRoleCredentialOptions(func(v *stscreds.WebIdentityRoleOptions) { + v.RoleARN = roleARN + v.TokenRetriever = stscreds.IdentityTokenFile(tokenPath) + })) + } + + cfg, err := config.LoadDefaultConfig(context.Background(), optFns...) + if err != nil { + return nil, fmt.Errorf("failed to load config to create ecs client: %w", err) + } + c.ecsClient = ecs.NewFromConfig(cfg) + c.elbClient = elasticloadbalancingv2.NewFromConfig(cfg) + + return c, nil +} + +func (c *client) CreateService(ctx context.Context, service types.Service) (*types.Service, error) { + if service.DeploymentController == nil || service.DeploymentController.Type != types.DeploymentControllerTypeExternal { + return nil, fmt.Errorf("failed to create ECS service %s: deployment controller of type EXTERNAL is required", *service.ServiceName) + } + input := &ecs.CreateServiceInput{ + Cluster: service.ClusterArn, + ServiceName: service.ServiceName, + DesiredCount: aws.Int32(service.DesiredCount), + DeploymentController: service.DeploymentController, + DeploymentConfiguration: service.DeploymentConfiguration, + EnableECSManagedTags: service.EnableECSManagedTags, + EnableExecuteCommand: service.EnableExecuteCommand, + HealthCheckGracePeriodSeconds: service.HealthCheckGracePeriodSeconds, + PlacementConstraints: service.PlacementConstraints, + PlacementStrategy: service.PlacementStrategy, + PlatformVersion: service.PlatformVersion, + PropagateTags: types.PropagateTagsService, + Role: service.RoleArn, + SchedulingStrategy: service.SchedulingStrategy, + Tags: service.Tags, + } + output, err := c.ecsClient.CreateService(ctx, input) + if err != nil { + return nil, fmt.Errorf("failed to create ECS service %s: %w", *service.ServiceName, err) + } + + // Hack: Since we use EXTERNAL deployment controller, the below configurations are not allowed to be passed + // in CreateService step, but it required in further step (CreateTaskSet step). We reassign those values + // as part of service definition for that purpose. + // ref: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html + output.Service.LaunchType = service.LaunchType + output.Service.NetworkConfiguration = service.NetworkConfiguration + output.Service.ServiceRegistries = service.ServiceRegistries + + return output.Service, nil +} + +func (c *client) UpdateService(ctx context.Context, service types.Service) (*types.Service, error) { + input := &ecs.UpdateServiceInput{ + Cluster: service.ClusterArn, + Service: service.ServiceName, + DesiredCount: aws.Int32(service.DesiredCount), + EnableExecuteCommand: aws.Bool(service.EnableExecuteCommand), + PlacementStrategy: service.PlacementStrategy, + // TODO: Support update other properties of service. + // PlacementConstraints: service.PlacementConstraints, + } + output, err := c.ecsClient.UpdateService(ctx, input) + if err != nil { + return nil, fmt.Errorf("failed to update ECS service %s: %w", *service.ServiceName, err) + } + + // Hack: Since we use EXTERNAL deployment controller, the below configurations are not allowed to be passed + // in UpdateService step, but it required in further step (CreateTaskSet step). We reassign those values + // as part of service definition for that purpose. + // ref: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html + output.Service.LaunchType = service.LaunchType + output.Service.NetworkConfiguration = service.NetworkConfiguration + output.Service.ServiceRegistries = service.ServiceRegistries + + return output.Service, nil +} + +func (c *client) RegisterTaskDefinition(ctx context.Context, taskDefinition types.TaskDefinition) (*types.TaskDefinition, error) { + input := &ecs.RegisterTaskDefinitionInput{ + Family: taskDefinition.Family, + ContainerDefinitions: taskDefinition.ContainerDefinitions, + RequiresCompatibilities: taskDefinition.RequiresCompatibilities, + ExecutionRoleArn: taskDefinition.ExecutionRoleArn, + TaskRoleArn: taskDefinition.TaskRoleArn, + NetworkMode: taskDefinition.NetworkMode, + Volumes: taskDefinition.Volumes, + RuntimePlatform: taskDefinition.RuntimePlatform, + // Requires defined at task level in case Fargate is used. + Cpu: taskDefinition.Cpu, + Memory: taskDefinition.Memory, + // TODO: Support tags for registering task definition. + } + output, err := c.ecsClient.RegisterTaskDefinition(ctx, input) + if err != nil { + return nil, fmt.Errorf("failed to register ECS task definition of family %s: %w", *taskDefinition.Family, err) + } + return output.TaskDefinition, nil +} + +func (c *client) RunTask(ctx context.Context, taskDefinition types.TaskDefinition, clusterArn string, launchType string, awsVpcConfiguration *appconfig.ECSVpcConfiguration, tags []types.Tag) error { + if taskDefinition.TaskDefinitionArn == nil { + return fmt.Errorf("failed to run task of task family %s: no task definition provided", *taskDefinition.Family) + } + + input := &ecs.RunTaskInput{ + TaskDefinition: taskDefinition.Family, + Cluster: aws.String(clusterArn), + LaunchType: types.LaunchType(launchType), + Tags: tags, + } + + if len(awsVpcConfiguration.Subnets) > 0 { + input.NetworkConfiguration = &types.NetworkConfiguration{ + AwsvpcConfiguration: &types.AwsVpcConfiguration{ + Subnets: awsVpcConfiguration.Subnets, + AssignPublicIp: types.AssignPublicIp(awsVpcConfiguration.AssignPublicIP), + SecurityGroups: awsVpcConfiguration.SecurityGroups, + }, + } + } + + _, err := c.ecsClient.RunTask(ctx, input) + if err != nil { + return fmt.Errorf("failed to run ECS task %s: %w", *taskDefinition.TaskDefinitionArn, err) + } + return nil +} + +func (c *client) CreateTaskSet(ctx context.Context, service types.Service, taskDefinition types.TaskDefinition, targetGroup *types.LoadBalancer, scale int) (*types.TaskSet, error) { + if taskDefinition.TaskDefinitionArn == nil { + return nil, fmt.Errorf("failed to create task set of task family %s: no task definition provided", *taskDefinition.Family) + } + + input := &ecs.CreateTaskSetInput{ + Cluster: service.ClusterArn, + Service: service.ServiceArn, + TaskDefinition: taskDefinition.TaskDefinitionArn, + Scale: &types.Scale{Unit: types.ScaleUnitPercent, Value: float64(scale)}, + Tags: service.Tags, + // If you specify the awsvpc network mode, the task is allocated an elastic network interface, + // and you must specify a NetworkConfiguration when run a task with the task definition. + NetworkConfiguration: service.NetworkConfiguration, + LaunchType: service.LaunchType, + ServiceRegistries: service.ServiceRegistries, + } + if targetGroup != nil { + input.LoadBalancers = []types.LoadBalancer{*targetGroup} + } + output, err := c.ecsClient.CreateTaskSet(ctx, input) + if err != nil { + return nil, fmt.Errorf("failed to create ECS task set %s: %w", *taskDefinition.TaskDefinitionArn, err) + } + + // Wait created TaskSet to be stable. + waitInput := &ecs.DescribeTaskSetsInput{ + Cluster: service.ClusterArn, + Service: service.ServiceArn, + TaskSets: []string{*output.TaskSet.TaskSetArn}, + } + + retry := backoff.NewRetry(retryTaskSetStable, backoff.NewConstant(retryTaskSetStableInterval)) + _, err = retry.Do(ctx, func() (interface{}, error) { + output, err := c.ecsClient.DescribeTaskSets(ctx, waitInput) + if err != nil { + return nil, fmt.Errorf("failed to get ECS task set %s: %w", *taskDefinition.TaskDefinitionArn, err) + } + if len(output.TaskSets) == 0 { + return nil, fmt.Errorf("failed to get ECS task set %s: task sets empty", *taskDefinition.TaskDefinitionArn) + } + taskSet := output.TaskSets[0] + if taskSet.StabilityStatus == types.StabilityStatusSteadyState { + return nil, nil + } + return nil, fmt.Errorf("task set %s is not stable", *taskDefinition.TaskDefinitionArn) + }) + + if err != nil { + return nil, fmt.Errorf("failed to wait ECS task set %s stable: %w", *taskDefinition.TaskDefinitionArn, err) + } + + return output.TaskSet, nil +} + +func (c *client) GetServiceTaskSets(ctx context.Context, service types.Service) ([]*types.TaskSet, error) { + input := &ecs.DescribeServicesInput{ + Cluster: service.ClusterArn, + Services: []string{ + *service.ServiceArn, + }, + } + output, err := c.ecsClient.DescribeServices(ctx, input) + if err != nil { + return nil, fmt.Errorf("failed to get task sets of service %s: %w", *service.ServiceName, err) + } + if len(output.Services) == 0 { + return nil, fmt.Errorf("failed to get task sets of service %s: services empty", *service.ServiceName) + } + svc := output.Services[0] + activeTaskSetArns := make([]string, 0, len(svc.TaskSets)) + for i := range svc.TaskSets { + if aws.ToString(svc.TaskSets[i].Status) == "DRAINING" { + continue + } + activeTaskSetArns = append(activeTaskSetArns, *svc.TaskSets[i].TaskSetArn) + } + + // No primary or active task set found. + if len(activeTaskSetArns) == 0 { + return []*types.TaskSet{}, nil + } + + tsInput := &ecs.DescribeTaskSetsInput{ + Cluster: service.ClusterArn, + Service: service.ServiceArn, + TaskSets: activeTaskSetArns, + Include: []types.TaskSetField{ + types.TaskSetFieldTags, + }, + } + tsOutput, err := c.ecsClient.DescribeTaskSets(ctx, tsInput) + if err != nil { + return nil, fmt.Errorf("failed to get task sets of service %s: %w", *service.ServiceName, err) + } + taskSets := make([]*types.TaskSet, 0, len(tsOutput.TaskSets)) + for i := range tsOutput.TaskSets { + if !IsPipeCDManagedTaskSet(&tsOutput.TaskSets[i]) { + continue + } + taskSets = append(taskSets, &tsOutput.TaskSets[i]) + } + + return taskSets, nil +} + +// WaitServiceStable blocks until the ECS service is stable. +// It returns nil if the service is stable, otherwise it returns an error. +// Note: This function follow the implementation of the AWS CLI. +// AWS does not public API for waiting service stable, thus we use describe-service and workaround instead. +// ref: https://docs.aws.amazon.com/cli/latest/reference/ecs/wait/services-stable.html +func (c *client) WaitServiceStable(ctx context.Context, service types.Service) error { + input := &ecs.DescribeServicesInput{ + Cluster: service.ClusterArn, + Services: []string{*service.ServiceArn}, + } + + retry := backoff.NewRetry(retryServiceStable, backoff.NewConstant(retryServiceStableInterval)) + _, err := retry.Do(ctx, func() (interface{}, error) { + output, err := c.ecsClient.DescribeServices(ctx, input) + if err != nil { + return nil, fmt.Errorf("failed to get service %s: %w", *service.ServiceName, err) + } + + if len(output.Services) == 0 { + return nil, platformprovider.ErrNotFound + } + + svc := output.Services[0] + if svc.PendingCount == 0 && svc.RunningCount >= svc.DesiredCount { + return nil, nil + } + + return nil, fmt.Errorf("service %s is not stable", *service.ServiceName) + }) + + return err +} + +func (c *client) DeleteTaskSet(ctx context.Context, taskSet types.TaskSet) error { + input := &ecs.DeleteTaskSetInput{ + Cluster: taskSet.ClusterArn, + Service: taskSet.ServiceArn, + TaskSet: taskSet.TaskSetArn, + } + if _, err := c.ecsClient.DeleteTaskSet(ctx, input); err != nil { + return fmt.Errorf("failed to delete ECS task set %s: %w", *taskSet.TaskSetArn, err) + } + + // Inactive deleted taskset's task definition. + taskDefInput := &ecs.DeregisterTaskDefinitionInput{ + TaskDefinition: taskSet.TaskDefinition, + } + if _, err := c.ecsClient.DeregisterTaskDefinition(ctx, taskDefInput); err != nil { + return fmt.Errorf("failed to inactive ECS task definition %s: %w", *taskSet.TaskDefinition, err) + } + return nil +} + +func (c *client) UpdateServicePrimaryTaskSet(ctx context.Context, service types.Service, taskSet types.TaskSet) (*types.TaskSet, error) { + input := &ecs.UpdateServicePrimaryTaskSetInput{ + Cluster: service.ClusterArn, + Service: service.ServiceArn, + PrimaryTaskSet: taskSet.TaskSetArn, + } + output, err := c.ecsClient.UpdateServicePrimaryTaskSet(ctx, input) + if err != nil { + return nil, fmt.Errorf("failed to update service primary ECS task set %s: %w", *taskSet.TaskSetArn, err) + } + return output.TaskSet, nil +} + +func (c *client) ServiceExists(ctx context.Context, clusterName string, serviceName string) (bool, error) { + input := &ecs.DescribeServicesInput{ + Cluster: aws.String(clusterName), + Services: []string{serviceName}, + } + output, err := c.ecsClient.DescribeServices(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + // Only in case ResourceNotFound error occurred, the FunctionName is available for create so do not raise error. + return false, nil + } + return false, err + } + // Note: In case of cluster's existing serviceName is set to inactive status, it's safe to recreate the service with the same serviceName. + for _, service := range output.Services { + if *service.ServiceName == serviceName && *service.Status == "ACTIVE" { + return true, nil + } + } + return false, nil +} + +func (c *client) GetListenerArns(ctx context.Context, targetGroup types.LoadBalancer) ([]string, error) { + loadBalancerArn, err := c.getLoadBalancerArn(ctx, *targetGroup.TargetGroupArn) + if err != nil { + return nil, err + } + + input := &elasticloadbalancingv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(loadBalancerArn), + } + output, err := c.elbClient.DescribeListeners(ctx, input) + if err != nil { + return nil, err + } + if len(output.Listeners) == 0 { + return nil, platformprovider.ErrNotFound + } + + arns := make([]string, len(output.Listeners)) + for i := range output.Listeners { + arns[i] = *output.Listeners[i].ListenerArn + } + + return arns, nil +} + +func (c *client) getLoadBalancerArn(ctx context.Context, targetGroupArn string) (string, error) { + input := &elasticloadbalancingv2.DescribeTargetGroupsInput{ + TargetGroupArns: []string{targetGroupArn}, + } + output, err := c.elbClient.DescribeTargetGroups(ctx, input) + if err != nil { + return "", err + } + if len(output.TargetGroups) == 0 { + return "", platformprovider.ErrNotFound + } + // Note: Currently, only support TargetGroup which serves traffic from one Load Balancer. + return output.TargetGroups[0].LoadBalancerArns[0], nil +} + +func (c *client) ModifyListeners(ctx context.Context, listenerArns []string, routingTrafficCfg RoutingTrafficConfig) error { + if len(routingTrafficCfg) != 2 { + return fmt.Errorf("invalid listener configuration: requires 2 target groups") + } + + for _, listenerArn := range listenerArns { + describeRulesOutput, err := c.elbClient.DescribeRules(ctx, &elasticloadbalancingv2.DescribeRulesInput{ + ListenerArn: aws.String(listenerArn), + }) + if err != nil { + return fmt.Errorf("failed to describe rules of listener %s: %w", listenerArn, err) + } + + for _, rule := range describeRulesOutput.Rules { + modifiedActions := make([]elbtypes.Action, 0, len(rule.Actions)) + for _, action := range rule.Actions { + if action.Type == elbtypes.ActionTypeEnumForward && routingTrafficCfg.hasSameTargets(action.ForwardConfig.TargetGroups) { + // Modify only the forward action which has the same target groups. + modifiedAction := elbtypes.Action{ + Type: elbtypes.ActionTypeEnumForward, + Order: action.Order, + ForwardConfig: &elbtypes.ForwardActionConfig{ + TargetGroups: []elbtypes.TargetGroupTuple{ + { + TargetGroupArn: aws.String(routingTrafficCfg[0].TargetGroupArn), + Weight: aws.Int32(int32(routingTrafficCfg[0].Weight)), + }, + { + TargetGroupArn: aws.String(routingTrafficCfg[1].TargetGroupArn), + Weight: aws.Int32(int32(routingTrafficCfg[1].Weight)), + }, + }, + }, + } + modifiedActions = append(modifiedActions, modifiedAction) + } else { + modifiedActions = append(modifiedActions, action) + } + } + + // The default rule needs to be modified by ModifyListener API. + if rule.IsDefault { + _, err := c.elbClient.ModifyListener(ctx, &elasticloadbalancingv2.ModifyListenerInput{ + ListenerArn: &listenerArn, + DefaultActions: modifiedActions, + }) + if err != nil { + return fmt.Errorf("failed to modify default rule %s: %w", *rule.RuleArn, err) + } + } else { + _, err := c.elbClient.ModifyRule(ctx, &elasticloadbalancingv2.ModifyRuleInput{ + RuleArn: rule.RuleArn, + Actions: modifiedActions, + }) + if err != nil { + return fmt.Errorf("failed to modify rule %s: %w", *rule.RuleArn, err) + } + } + } + } + return nil +} + +func (c *client) TagResource(ctx context.Context, resourceArn string, tags []types.Tag) error { + input := &ecs.TagResourceInput{ + ResourceArn: aws.String(resourceArn), + Tags: tags, + } + _, err := c.ecsClient.TagResource(ctx, input) + if err != nil { + return fmt.Errorf("failed to update tag of resource %s: %w", resourceArn, err) + } + return nil +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/ecs.go b/pkg/app/pipedv1/platformprovider/ecs/ecs.go new file mode 100644 index 0000000000..6814b8f6ac --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/ecs.go @@ -0,0 +1,132 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "context" + "path/filepath" + "sync" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + + "github.com/pipe-cd/pipecd/pkg/config" +) + +const ( + LabelManagedBy string = "pipecd-dev-managed-by" // Always be piped. + LabelPiped string = "pipecd-dev-piped" // The id of piped handling this application. + LabelApplication string = "pipecd-dev-application" // The application this resource belongs to. + LabelCommitHash string = "pipecd-dev-commit-hash" // Hash value of the deployed commit. + ManagedByPiped string = "piped" +) + +// Client is wrapper of ECS client. +type Client interface { + ECS + ELB +} + +type ECS interface { + ServiceExists(ctx context.Context, clusterName string, servicesName string) (bool, error) + CreateService(ctx context.Context, service types.Service) (*types.Service, error) + UpdateService(ctx context.Context, service types.Service) (*types.Service, error) + WaitServiceStable(ctx context.Context, service types.Service) error + RegisterTaskDefinition(ctx context.Context, taskDefinition types.TaskDefinition) (*types.TaskDefinition, error) + RunTask(ctx context.Context, taskDefinition types.TaskDefinition, clusterArn string, launchType string, awsVpcConfiguration *config.ECSVpcConfiguration, tags []types.Tag) error + GetServiceTaskSets(ctx context.Context, service types.Service) ([]*types.TaskSet, error) + CreateTaskSet(ctx context.Context, service types.Service, taskDefinition types.TaskDefinition, targetGroup *types.LoadBalancer, scale int) (*types.TaskSet, error) + DeleteTaskSet(ctx context.Context, taskSet types.TaskSet) error + UpdateServicePrimaryTaskSet(ctx context.Context, service types.Service, taskSet types.TaskSet) (*types.TaskSet, error) + TagResource(ctx context.Context, resourceArn string, tags []types.Tag) error +} + +type ELB interface { + GetListenerArns(ctx context.Context, targetGroup types.LoadBalancer) ([]string, error) + // ModifyListeners modifies the actions of type ActionTypeEnumForward to perform routing traffic + // to the given target groups. Other actions won't be modified. + ModifyListeners(ctx context.Context, listenerArns []string, routingTrafficCfg RoutingTrafficConfig) error +} + +// Registry holds a pool of aws client wrappers. +type Registry interface { + Client(name string, cfg *config.PlatformProviderECSConfig, logger *zap.Logger) (Client, error) +} + +// LoadServiceDefinition returns ServiceDefinition object from a given service definition file. +func LoadServiceDefinition(appDir, serviceDefinitionFilename string) (types.Service, error) { + path := filepath.Join(appDir, serviceDefinitionFilename) + return loadServiceDefinition(path) +} + +// LoadTaskDefinition returns TaskDefinition object from a given task definition file. +func LoadTaskDefinition(appDir, taskDefinition string) (types.TaskDefinition, error) { + path := filepath.Join(appDir, taskDefinition) + return loadTaskDefinition(path) +} + +// LoadTargetGroups returns primary & canary target groups according to the defined in pipe definition file. +func LoadTargetGroups(targetGroups config.ECSTargetGroups) (*types.LoadBalancer, *types.LoadBalancer, error) { + return loadTargetGroups(targetGroups) +} + +type registry struct { + clients map[string]Client + mu sync.RWMutex + newGroup *singleflight.Group +} + +func (r *registry) Client(name string, cfg *config.PlatformProviderECSConfig, logger *zap.Logger) (Client, error) { + r.mu.RLock() + client, ok := r.clients[name] + r.mu.RUnlock() + if ok { + return client, nil + } + + c, err, _ := r.newGroup.Do(name, func() (interface{}, error) { + return newClient(cfg.Region, cfg.Profile, cfg.CredentialsFile, cfg.RoleARN, cfg.TokenFile, logger) + }) + if err != nil { + return nil, err + } + + client = c.(Client) + r.mu.Lock() + r.clients[name] = client + r.mu.Unlock() + + return client, nil +} + +var defaultRegistry = ®istry{ + clients: make(map[string]Client), + newGroup: &singleflight.Group{}, +} + +// DefaultRegistry returns a pool of aws clients and a mutex associated with it. +func DefaultRegistry() Registry { + return defaultRegistry +} + +func MakeTags(tags map[string]string) []types.Tag { + resourceTags := make([]types.Tag, 0, len(tags)) + for key, value := range tags { + resourceTags = append(resourceTags, types.Tag{Key: aws.String(key), Value: aws.String(value)}) + } + return resourceTags +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/routing_traffic.go b/pkg/app/pipedv1/platformprovider/ecs/routing_traffic.go new file mode 100644 index 0000000000..146b823a4c --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/routing_traffic.go @@ -0,0 +1,45 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" +) + +type RoutingTrafficConfig []targetGroupWeight + +type targetGroupWeight struct { + TargetGroupArn string + Weight int +} + +func (c RoutingTrafficConfig) hasSameTargets(forwardActionTargets []types.TargetGroupTuple) bool { + if len(c) != len(forwardActionTargets) { + return false + } + + cMap := make(map[string]struct{}) + for _, item := range c { + cMap[item.TargetGroupArn] = struct{}{} + } + + for _, target := range forwardActionTargets { + if _, ok := cMap[*target.TargetGroupArn]; !ok { + return false + } + } + + return true +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/routing_traffic_test.go b/pkg/app/pipedv1/platformprovider/ecs/routing_traffic_test.go new file mode 100644 index 0000000000..eb6ea9cc5d --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/routing_traffic_test.go @@ -0,0 +1,135 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/stretchr/testify/assert" +) + +func TestHasSameTargets(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + cfg RoutingTrafficConfig + actionTargets []types.TargetGroupTuple + expected bool + }{ + { + name: "has the same target groups in the same order", + cfg: RoutingTrafficConfig{ + { + TargetGroupArn: "arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1", + Weight: 100, + }, + { + TargetGroupArn: "arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy2", + Weight: 0, + }, + }, + actionTargets: []types.TargetGroupTuple{ + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1"), + Weight: aws.Int32(100), + }, + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy2"), + Weight: aws.Int32(0), + }, + }, + expected: true, + }, + { + name: "has the same target groups in the different order", + cfg: RoutingTrafficConfig{ + { + TargetGroupArn: "arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1", + Weight: 100, + }, + { + TargetGroupArn: "arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy2", + Weight: 0, + }, + }, + actionTargets: []types.TargetGroupTuple{ + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy2"), + Weight: aws.Int32(0), + }, + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1"), + Weight: aws.Int32(100), + }, + }, + expected: true, + }, + { + name: "the number of target groups are different", + cfg: RoutingTrafficConfig{ + { + TargetGroupArn: "arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1", + Weight: 100, + }, + }, + actionTargets: []types.TargetGroupTuple{ + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1"), + Weight: aws.Int32(0), + }, + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy2"), + Weight: aws.Int32(100), + }, + }, + expected: false, + }, + { + name: "has a different target group", + cfg: RoutingTrafficConfig{ + { + TargetGroupArn: "arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1", + Weight: 100, + }, + { + TargetGroupArn: "arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy2", + Weight: 0, + }, + }, + actionTargets: []types.TargetGroupTuple{ + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy1"), + Weight: aws.Int32(0), + }, + { + TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:::targetgroup/xxx/yyy3"), + Weight: aws.Int32(100), + }, + }, + expected: false, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + hasSame := tc.cfg.hasSameTargets(tc.actionTargets) + assert.Equal(t, tc.expected, hasSame) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/servce_test.go b/pkg/app/pipedv1/platformprovider/ecs/servce_test.go new file mode 100644 index 0000000000..90db429014 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/servce_test.go @@ -0,0 +1,167 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "github.com/stretchr/testify/assert" +) + +func TestParseServiceDefinition(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + input string + expected types.Service + expectedErr bool + }{ + { + name: "yaml format input", + input: ` +cluster: arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY +serviceName: nginx-external-canary +desiredCount: 2 +role: arn:aws:iam::XXXXX:role/ecsTaskExecutionRole +deploymentConfiguration: + maximumPercent: 200 + minimumHealthyPercent: 0 +schedulingStrategy: REPLICA +deploymentController: + type: EXTERNAL +`, + expected: types.Service{ + ClusterArn: aws.String("arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY"), + ServiceName: aws.String("nginx-external-canary"), + DesiredCount: 2, + RoleArn: aws.String("arn:aws:iam::XXXXX:role/ecsTaskExecutionRole"), + DeploymentConfiguration: &types.DeploymentConfiguration{ + MaximumPercent: aws.Int32(200), + MinimumHealthyPercent: aws.Int32(0), + }, + SchedulingStrategy: types.SchedulingStrategyReplica, + DeploymentController: &types.DeploymentController{ + Type: types.DeploymentControllerTypeExternal, + }, + }, + }, + { + name: "yaml format input with roleArn field name", + input: ` +cluster: arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY +serviceName: nginx-external-canary +desiredCount: 2 +roleArn: arn:aws:iam::XXXXX:role/ecsTaskExecutionRole +deploymentConfiguration: + maximumPercent: 200 + minimumHealthyPercent: 0 +schedulingStrategy: REPLICA +deploymentController: + type: EXTERNAL +`, + expected: types.Service{ + ClusterArn: aws.String("arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY"), + ServiceName: aws.String("nginx-external-canary"), + DesiredCount: 2, + RoleArn: aws.String("arn:aws:iam::XXXXX:role/ecsTaskExecutionRole"), + DeploymentConfiguration: &types.DeploymentConfiguration{ + MaximumPercent: aws.Int32(200), + MinimumHealthyPercent: aws.Int32(0), + }, + SchedulingStrategy: types.SchedulingStrategyReplica, + DeploymentController: &types.DeploymentController{ + Type: types.DeploymentControllerTypeExternal, + }, + }, + }, + { + name: "json format input", + input: ` +{ + "cluster": "arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY", + "serviceName": "nginx-external-canary", + "desiredCount": 2, + "role": "arn:aws:iam::XXXXX:role/ecsTaskExecutionRole", + "deploymentConfiguration": { + "maximumPercent": 200, + "minimumHealthyPercent": 0 + }, + "schedulingStrategy": "REPLICA", + "deploymentController": { + "type": "EXTERNAL" + } +} +`, + expected: types.Service{ + ClusterArn: aws.String("arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY"), + ServiceName: aws.String("nginx-external-canary"), + DesiredCount: 2, + RoleArn: aws.String("arn:aws:iam::XXXXX:role/ecsTaskExecutionRole"), + DeploymentConfiguration: &types.DeploymentConfiguration{ + MaximumPercent: aws.Int32(200), + MinimumHealthyPercent: aws.Int32(0), + }, + SchedulingStrategy: types.SchedulingStrategyReplica, + DeploymentController: &types.DeploymentController{ + Type: types.DeploymentControllerTypeExternal, + }, + }, + }, + { + name: "json format input with clusterArn field name", + input: ` +{ + "clusterArn": "arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY", + "serviceName": "nginx-external-canary", + "desiredCount": 2, + "role": "arn:aws:iam::XXXXX:role/ecsTaskExecutionRole", + "deploymentConfiguration": { + "maximumPercent": 200, + "minimumHealthyPercent": 0 + }, + "schedulingStrategy": "REPLICA", + "deploymentController": { + "type": "EXTERNAL" + } +} +`, + expected: types.Service{ + ClusterArn: aws.String("arn:aws:ecs:ap-northeast-1:XXXX:cluster/YYYY"), + ServiceName: aws.String("nginx-external-canary"), + DesiredCount: 2, + RoleArn: aws.String("arn:aws:iam::XXXXX:role/ecsTaskExecutionRole"), + DeploymentConfiguration: &types.DeploymentConfiguration{ + MaximumPercent: aws.Int32(200), + MinimumHealthyPercent: aws.Int32(0), + }, + SchedulingStrategy: types.SchedulingStrategyReplica, + DeploymentController: &types.DeploymentController{ + Type: types.DeploymentControllerTypeExternal, + }, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got, err := parseServiceDefinition([]byte(tc.input)) + assert.Equal(t, tc.expectedErr, err != nil) + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/service.go b/pkg/app/pipedv1/platformprovider/ecs/service.go new file mode 100644 index 0000000000..c65b03c0fd --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/service.go @@ -0,0 +1,78 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "os" + + "sigs.k8s.io/yaml" + + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +func loadServiceDefinition(path string) (types.Service, error) { + data, err := os.ReadFile(path) + if err != nil { + return types.Service{}, err + } + return parseServiceDefinition(data) +} + +func parseServiceDefinition(data []byte) (types.Service, error) { + var obj types.Service + if err := yaml.Unmarshal(data, &obj); err != nil { + return types.Service{}, err + } + + if obj.ClusterArn == nil { + // Rename cluster field to clusterArn if exist + clusterArn, err := parseServiceDefinitionForCluster(data) + if err != nil { + return types.Service{}, err + } + obj.ClusterArn = &clusterArn + } + + if obj.RoleArn == nil { + // Rename role field to roleArn if exist + roleArn, err := parseServiceDefinitionForRole(data) + if err != nil { + return types.Service{}, err + } + obj.RoleArn = &roleArn + } + + return obj, nil +} + +func parseServiceDefinitionForCluster(data []byte) (string, error) { + var obj struct { + Cluster string `json:"cluster"` + } + if err := yaml.Unmarshal(data, &obj); err != nil { + return "", err + } + return obj.Cluster, nil +} + +func parseServiceDefinitionForRole(data []byte) (string, error) { + var obj struct { + Role string `json:"role"` + } + if err := yaml.Unmarshal(data, &obj); err != nil { + return "", err + } + return obj.Role, nil +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/target_groups.go b/pkg/app/pipedv1/platformprovider/ecs/target_groups.go new file mode 100644 index 0000000000..af170ae59f --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/target_groups.go @@ -0,0 +1,51 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + + "github.com/pipe-cd/pipecd/pkg/config" +) + +var ErrNoTargetGroup = errors.New("no target group") + +func loadTargetGroups(targetGroups config.ECSTargetGroups) (*types.LoadBalancer, *types.LoadBalancer, error) { + if targetGroups.Primary == nil { + return nil, nil, ErrNoTargetGroup + } + + primary := &types.LoadBalancer{ + TargetGroupArn: aws.String(targetGroups.Primary.TargetGroupArn), + ContainerName: aws.String(targetGroups.Primary.ContainerName), + ContainerPort: aws.Int32(int32(targetGroups.Primary.ContainerPort)), + LoadBalancerName: aws.String(targetGroups.Primary.LoadBalancerName), + } + + var canary *types.LoadBalancer + if targetGroups.Canary != nil { + canary = &types.LoadBalancer{ + TargetGroupArn: aws.String(targetGroups.Canary.TargetGroupArn), + ContainerName: aws.String(targetGroups.Canary.ContainerName), + ContainerPort: aws.Int32(int32(targetGroups.Canary.ContainerPort)), + LoadBalancerName: aws.String(targetGroups.Canary.LoadBalancerName), + } + } + + return primary, canary, nil +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/target_groups_test.go b/pkg/app/pipedv1/platformprovider/ecs/target_groups_test.go new file mode 100644 index 0000000000..6e1c88d2af --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/target_groups_test.go @@ -0,0 +1,102 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "github.com/stretchr/testify/assert" + + "github.com/pipe-cd/pipecd/pkg/config" +) + +func TestLoadTargetGroup(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + cfg config.ECSTargetGroups + expected []*types.LoadBalancer + expectedErr bool + }{ + { + name: "no target group", + cfg: config.ECSTargetGroups{}, + expected: []*types.LoadBalancer{nil, nil}, + expectedErr: true, + }, + { + name: "primary target group only", + cfg: config.ECSTargetGroups{ + Primary: &config.ECSTargetGroup{ + TargetGroupArn: "primary-target-group-arn", + ContainerName: "primary-container-name", + ContainerPort: 80, + }, + }, + expected: []*types.LoadBalancer{ + { + TargetGroupArn: aws.String("primary-target-group-arn"), + ContainerName: aws.String("primary-container-name"), + ContainerPort: aws.Int32(80), + LoadBalancerName: aws.String(""), + }, + nil, + }, + expectedErr: false, + }, + { + name: "primary and canary target group", + cfg: config.ECSTargetGroups{ + Primary: &config.ECSTargetGroup{ + TargetGroupArn: "primary-target-group-arn", + ContainerName: "primary-container-name", + ContainerPort: 80, + }, + Canary: &config.ECSTargetGroup{ + TargetGroupArn: "canary-target-group-arn", + ContainerName: "canary-container-name", + ContainerPort: 80, + }, + }, + expected: []*types.LoadBalancer{ + { + TargetGroupArn: aws.String("primary-target-group-arn"), + ContainerName: aws.String("primary-container-name"), + ContainerPort: aws.Int32(80), + LoadBalancerName: aws.String(""), + }, + { + TargetGroupArn: aws.String("canary-target-group-arn"), + ContainerName: aws.String("canary-container-name"), + ContainerPort: aws.Int32(80), + LoadBalancerName: aws.String(""), + }, + }, + expectedErr: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + primary, canary, err := loadTargetGroups(tc.cfg) + assert.Equal(t, tc.expectedErr, err != nil) + assert.Equal(t, tc.expected[0], primary) + assert.Equal(t, tc.expected[1], canary) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/task.go b/pkg/app/pipedv1/platformprovider/ecs/task.go new file mode 100644 index 0000000000..c75b38fe2c --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/task.go @@ -0,0 +1,94 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "fmt" + "os" + "strings" + + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "sigs.k8s.io/yaml" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func loadTaskDefinition(path string) (types.TaskDefinition, error) { + data, err := os.ReadFile(path) + if err != nil { + return types.TaskDefinition{}, err + } + return parseTaskDefinition(data) +} + +func parseTaskDefinition(data []byte) (types.TaskDefinition, error) { + var obj types.TaskDefinition + if err := yaml.Unmarshal(data, &obj); err != nil { + return types.TaskDefinition{}, err + } + return obj, nil +} + +// FindImageTag parses image tag from given ECS task definition. +func FindImageTag(taskDefinition types.TaskDefinition) (string, error) { + if len(taskDefinition.ContainerDefinitions) == 0 { + return "", fmt.Errorf("container definition could not be empty") + } + name, tag := parseContainerImage(*taskDefinition.ContainerDefinitions[0].Image) + if name == "" { + return "", fmt.Errorf("image name could not be empty") + } + return tag, nil +} + +func parseContainerImage(image string) (name, tag string) { + parts := strings.Split(image, ":") + if len(parts) == 2 { + tag = parts[1] + } + paths := strings.Split(parts[0], "/") + name = paths[len(paths)-1] + return +} + +// FindArtifactVersions parses artifact versions from ECS task definition. +func FindArtifactVersions(taskDefinition types.TaskDefinition) ([]*model.ArtifactVersion, error) { + if len(taskDefinition.ContainerDefinitions) == 0 { + return nil, fmt.Errorf("container definition could not be empty") + } + + // Remove duplicate images. + imageMap := map[string]struct{}{} + for _, cd := range taskDefinition.ContainerDefinitions { + imageMap[*cd.Image] = struct{}{} + } + + versions := make([]*model.ArtifactVersion, 0, len(imageMap)) + for i := range imageMap { + name, tag := parseContainerImage(i) + if name == "" { + return nil, fmt.Errorf("image name could not be empty") + } + + versions = append(versions, &model.ArtifactVersion{ + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: tag, + Name: name, + Url: i, + }) + } + + return versions, nil +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/task_set.go b/pkg/app/pipedv1/platformprovider/ecs/task_set.go new file mode 100644 index 0000000000..d55fbbba03 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/task_set.go @@ -0,0 +1,26 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import "github.com/aws/aws-sdk-go-v2/service/ecs/types" + +func IsPipeCDManagedTaskSet(ts *types.TaskSet) bool { + for _, tag := range ts.Tags { + if *tag.Key == LabelManagedBy && *tag.Value == ManagedByPiped { + return true + } + } + return false +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/task_set_test.go b/pkg/app/pipedv1/platformprovider/ecs/task_set_test.go new file mode 100644 index 0000000000..a9ba2fe30c --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/task_set_test.go @@ -0,0 +1,61 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "github.com/stretchr/testify/assert" +) + +func TestIsPipeCDManagedTaskSet(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + ts *types.TaskSet + expected bool + }{ + { + name: "managed by piped", + ts: &types.TaskSet{Tags: []types.Tag{ + {Key: aws.String(LabelManagedBy), Value: aws.String(ManagedByPiped)}, + }}, + expected: true, + }, + { + name: "nil tags", + ts: &types.TaskSet{}, + expected: false, + }, + { + name: "not managed by piped", + ts: &types.TaskSet{Tags: []types.Tag{ + {Key: aws.String(LabelManagedBy), Value: aws.String("other")}, + {Key: aws.String("hoge"), Value: aws.String("fuga")}, + }}, + expected: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got := IsPipeCDManagedTaskSet(tc.ts) + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/ecs/task_test.go b/pkg/app/pipedv1/platformprovider/ecs/task_test.go new file mode 100644 index 0000000000..68ae7bef4b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/ecs/task_test.go @@ -0,0 +1,288 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ecs + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "github.com/stretchr/testify/assert" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestParseTaskDefinition(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + input string + expected types.TaskDefinition + expectedErr bool + }{ + { + name: "yaml format input", + input: ` +family: nginx-canary-fam-1 +compatibilities: + - FARGATE +networkMode: awsvpc +memory: 512 +cpu: 256 +`, + expected: types.TaskDefinition{ + Family: aws.String("nginx-canary-fam-1"), + Compatibilities: []types.Compatibility{types.CompatibilityFargate}, + NetworkMode: types.NetworkModeAwsvpc, + Memory: aws.String("512"), + Cpu: aws.String("256"), + }, + }, + { + name: "json format input", + input: ` +{ + "family": "nginx-canary-fam-1", + "compatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": 512, + "cpu": 256 +} +`, + expected: types.TaskDefinition{ + Family: aws.String("nginx-canary-fam-1"), + Compatibilities: []types.Compatibility{types.CompatibilityFargate}, + NetworkMode: types.NetworkModeAwsvpc, + Memory: aws.String("512"), + Cpu: aws.String("256"), + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got, err := parseTaskDefinition([]byte(tc.input)) + assert.Equal(t, tc.expectedErr, err != nil) + assert.Equal(t, tc.expected, got) + }) + } +} + +func TestFindArtifactVersions(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + input []byte + expected []*model.ArtifactVersion + expectedErr bool + }{ + { + name: "ok", + input: []byte(` +{ + "family": "nginx-canary-fam-1", + "compatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": 512, + "cpu": 256, + "containerDefinitions" : [ + { + "image": "gcr.io/pipecd/helloworld:v1.0.0", + "name": "helloworld", + "portMappings": [ + { + "containerPort": 80, + "hostPort": 9085, + "protocol": "tcp" + } + ] + } + ] +} +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v1.0.0", + }, + }, + expectedErr: false, + }, + { + name: "missing containerDefinitions", + input: []byte(` +{ + "family": "nginx-canary-fam-1", + "compatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": 512, + "cpu": 256, +} +`), + expected: nil, + expectedErr: true, + }, + { + name: "missing image name", + input: []byte(` +{ + "family": "nginx-canary-fam-1", + "compatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": 512, + "cpu": 256, + "containerDefinitions" : [ + { + "image": "gcr.io/pipecd/:v1.0.0", + "name": "helloworld", + "portMappings": [ + { + "containerPort": 80, + "hostPort": 9085, + "protocol": "tcp" + } + ] + } + ] +} +`), + expected: nil, + expectedErr: true, + }, + { + name: "multiple containers", + input: []byte(` +{ + "family": "nginx-canary-fam-1", + "compatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": 512, + "cpu": 256, + "containerDefinitions" : [ + { + "image": "gcr.io/pipecd/helloworld:v1.0.0", + "name": "helloworld", + "portMappings": [ + { + "containerPort": 80, + "hostPort": 9085, + "protocol": "tcp" + } + ] + }, + { + "image": "gcr.io/pipecd/my-service:v1.0.0", + "name": "my-service", + "portMappings": [ + { + "containerPort": 80, + "hostPort": 9090, + "protocol": "tcp" + } + ] + } + ] +} +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v1.0.0", + }, + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "my-service", + Url: "gcr.io/pipecd/my-service:v1.0.0", + }, + }, + expectedErr: false, + }, + { + name: "multiple containers with the same image", + input: []byte(` +{ + "family": "nginx-canary-fam-1", + "compatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": 512, + "cpu": 256, + "containerDefinitions" : [ + { + "image": "gcr.io/pipecd/helloworld:v1.0.0", + "name": "helloworld", + "portMappings": [ + { + "containerPort": 80, + "hostPort": 9085, + "protocol": "tcp" + } + ] + }, + { + "image": "gcr.io/pipecd/helloworld:v1.0.0", + "name": "helloworld-02", + "portMappings": [ + { + "containerPort": 80, + "hostPort": 9091, + "protocol": "tcp" + } + ] + } + ] +} +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v1.0.0", + Name: "helloworld", + Url: "gcr.io/pipecd/helloworld:v1.0.0", + }, + }, + expectedErr: false, + }, + } + + for _, tc := range testcases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + td, _ := parseTaskDefinition(tc.input) + versions, err := FindArtifactVersions(td) + assert.Equal(t, tc.expectedErr, err != nil) + assert.ElementsMatch(t, tc.expected, versions) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/applier.go b/pkg/app/pipedv1/platformprovider/kubernetes/applier.go new file mode 100644 index 0000000000..c33dd3130c --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/applier.go @@ -0,0 +1,247 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" + "github.com/pipe-cd/pipecd/pkg/config" +) + +type Applier interface { + // ApplyManifest does applying the given manifest. + ApplyManifest(ctx context.Context, manifest Manifest) error + // CreateManifest does creating resource from given manifest. + CreateManifest(ctx context.Context, manifest Manifest) error + // ReplaceManifest does replacing resource from given manifest. + ReplaceManifest(ctx context.Context, manifest Manifest) error + // Delete deletes the given resource from Kubernetes cluster. + Delete(ctx context.Context, key ResourceKey) error +} + +type applier struct { + input config.KubernetesDeploymentInput + platformProvider config.PlatformProviderKubernetesConfig + logger *zap.Logger + + kubectl *Kubectl + initOnce sync.Once + initErr error +} + +func NewApplier(input config.KubernetesDeploymentInput, cp config.PlatformProviderKubernetesConfig, logger *zap.Logger) Applier { + return &applier{ + input: input, + platformProvider: cp, + logger: logger.Named("kubernetes-applier"), + } +} + +// ApplyManifest does applying the given manifest. +func (a *applier) ApplyManifest(ctx context.Context, manifest Manifest) error { + a.initOnce.Do(func() { + a.kubectl, a.initErr = a.findKubectl(ctx, a.getToolVersionToRun()) + }) + if a.initErr != nil { + return a.initErr + } + + if a.input.AutoCreateNamespace { + err := a.kubectl.CreateNamespace( + ctx, + a.platformProvider.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + ) + if err != nil && !errors.Is(err, errResourceAlreadyExists) { + return err + } + } + + return a.kubectl.Apply( + ctx, + a.platformProvider.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + manifest, + ) +} + +// CreateManifest uses kubectl to create the given manifests. +func (a *applier) CreateManifest(ctx context.Context, manifest Manifest) error { + a.initOnce.Do(func() { + a.kubectl, a.initErr = a.findKubectl(ctx, a.getToolVersionToRun()) + }) + if a.initErr != nil { + return a.initErr + } + + if a.input.AutoCreateNamespace { + err := a.kubectl.CreateNamespace( + ctx, + a.platformProvider.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + ) + if err != nil && !errors.Is(err, errResourceAlreadyExists) { + return err + } + } + + return a.kubectl.Create( + ctx, + a.platformProvider.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + manifest, + ) +} + +// ReplaceManifest uses kubectl to replace the given manifests. +func (a *applier) ReplaceManifest(ctx context.Context, manifest Manifest) error { + a.initOnce.Do(func() { + a.kubectl, a.initErr = a.findKubectl(ctx, a.getToolVersionToRun()) + }) + if a.initErr != nil { + return a.initErr + } + + err := a.kubectl.Replace( + ctx, + a.platformProvider.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + manifest, + ) + if err == nil { + return nil + } + + if errors.Is(err, errorReplaceNotFound) { + return ErrNotFound + } + + return err +} + +// Delete deletes the given resource from Kubernetes cluster. +// If the resource key is different, this returns ErrNotFound. +func (a *applier) Delete(ctx context.Context, k ResourceKey) (err error) { + a.initOnce.Do(func() { + a.kubectl, a.initErr = a.findKubectl(ctx, a.getToolVersionToRun()) + }) + if a.initErr != nil { + return a.initErr + } + + m, err := a.kubectl.Get( + ctx, + a.platformProvider.KubeConfigPath, + a.getNamespaceToRun(k), + k, + ) + + if err != nil { + return err + } + + if k.String() != m.GetAnnotations()[LabelResourceKey] { + return ErrNotFound + } + + return a.kubectl.Delete( + ctx, + a.platformProvider.KubeConfigPath, + a.getNamespaceToRun(k), + k, + ) +} + +// getNamespaceToRun returns namespace used on kubectl apply/delete commands. +// priority: config.KubernetesDeploymentInput > kubernetes.ResourceKey +func (a *applier) getNamespaceToRun(k ResourceKey) string { + if a.input.Namespace != "" { + return a.input.Namespace + } + return k.Namespace +} + +// getToolVersionToRun returns version of kubectl which should be used for commands. +// priority: applicationConfig.KubectlVersion > pipedConfig.KubectlVersion +func (a *applier) getToolVersionToRun() string { + if a.input.KubectlVersion != "" { + return a.input.KubectlVersion + } + return a.platformProvider.KubectlVersion +} + +func (a *applier) findKubectl(ctx context.Context, version string) (*Kubectl, error) { + path, installed, err := toolregistry.DefaultRegistry().Kubectl(ctx, version) + if err != nil { + return nil, fmt.Errorf("no kubectl %s (%v)", version, err) + } + if installed { + a.logger.Info(fmt.Sprintf("kubectl %s has just been installed because of no pre-installed binary for that version", version)) + } + return NewKubectl(version, path), nil +} + +type multiApplier struct { + appliers []Applier +} + +// NewMultiApplier creates an applier that duplicates its operations to all the provided appliers. +func NewMultiApplier(appliers ...Applier) Applier { + return &multiApplier{ + appliers: appliers, + } +} + +func (a *multiApplier) ApplyManifest(ctx context.Context, manifest Manifest) error { + for _, a := range a.appliers { + if err := a.ApplyManifest(ctx, manifest); err != nil { + return err + } + } + return nil +} + +func (a *multiApplier) CreateManifest(ctx context.Context, manifest Manifest) error { + for _, a := range a.appliers { + if err := a.CreateManifest(ctx, manifest); err != nil { + return err + } + } + return nil +} + +func (a *multiApplier) ReplaceManifest(ctx context.Context, manifest Manifest) error { + for _, a := range a.appliers { + if err := a.ReplaceManifest(ctx, manifest); err != nil { + return err + } + } + return nil +} + +func (a *multiApplier) Delete(ctx context.Context, key ResourceKey) error { + for _, a := range a.appliers { + if err := a.Delete(ctx, key); err != nil { + return err + } + } + return nil +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/cache.go b/pkg/app/pipedv1/platformprovider/kubernetes/cache.go new file mode 100644 index 0000000000..d847a9c271 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/cache.go @@ -0,0 +1,68 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "errors" + "fmt" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/cache" +) + +type AppManifestsCache struct { + AppID string + Cache cache.Cache + Logger *zap.Logger +} + +func (c AppManifestsCache) Get(commit string) ([]Manifest, bool) { + key := appManifestsCacheKey(c.AppID, commit) + item, err := c.Cache.Get(key) + if err == nil { + return item.([]Manifest), true + } + + if errors.Is(err, cache.ErrNotFound) { + c.Logger.Info("app manifests were not found in cache", + zap.String("app-id", c.AppID), + zap.String("commit-hash", commit), + ) + return nil, false + } + + c.Logger.Error("failed while retrieving app manifests from cache", + zap.String("app-id", c.AppID), + zap.String("commit-hash", commit), + zap.Error(err), + ) + return nil, false +} + +func (c AppManifestsCache) Put(commit string, manifests []Manifest) { + key := appManifestsCacheKey(c.AppID, commit) + if err := c.Cache.Put(key, manifests); err != nil { + c.Logger.Error("failed while putting app manifests from cache", + zap.String("app-id", c.AppID), + zap.String("commit-hash", commit), + zap.Error(err), + ) + } +} + +func appManifestsCacheKey(appID, commit string) string { + return fmt.Sprintf("%s/%s", appID, commit) +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/deployment.go b/pkg/app/pipedv1/platformprovider/kubernetes/deployment.go new file mode 100644 index 0000000000..3d6cac50ab --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/deployment.go @@ -0,0 +1,110 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "sort" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +func FindReferencingConfigMapsInDeployment(d *appsv1.Deployment) []string { + m := make(map[string]struct{}, 0) + + // Find all configmaps specified in Volumes. + for _, v := range d.Spec.Template.Spec.Volumes { + if cm := v.ConfigMap; cm != nil { + m[cm.Name] = struct{}{} + } + } + + findInContainers := func(containers []corev1.Container) { + for _, c := range containers { + for _, env := range c.Env { + if source := env.ValueFrom; source != nil { + if ref := source.ConfigMapKeyRef; ref != nil { + m[ref.Name] = struct{}{} + } + } + } + for _, env := range c.EnvFrom { + if ref := env.ConfigMapRef; ref != nil { + m[ref.Name] = struct{}{} + } + } + } + } + + // Find all configmaps specified in Env. + findInContainers(d.Spec.Template.Spec.Containers) + findInContainers(d.Spec.Template.Spec.InitContainers) + + if len(m) == 0 { + return nil + } + + out := make([]string, 0, len(m)) + for k := range m { + out = append(out, k) + } + sort.Strings(out) + + return out +} + +func FindReferencingSecretsInDeployment(d *appsv1.Deployment) []string { + m := make(map[string]struct{}, 0) + + // Find all secrets specified in Volumes. + for _, v := range d.Spec.Template.Spec.Volumes { + if s := v.Secret; s != nil { + m[s.SecretName] = struct{}{} + } + } + + findInContainers := func(containers []corev1.Container) { + for _, c := range containers { + for _, env := range c.Env { + if source := env.ValueFrom; source != nil { + if ref := source.SecretKeyRef; ref != nil { + m[ref.Name] = struct{}{} + } + } + } + for _, env := range c.EnvFrom { + if ref := env.SecretRef; ref != nil { + m[ref.Name] = struct{}{} + } + } + } + } + + // Find all secrets specified in Env. + findInContainers(d.Spec.Template.Spec.Containers) + findInContainers(d.Spec.Template.Spec.InitContainers) + + if len(m) == 0 { + return nil + } + + out := make([]string, 0, len(m)) + for k := range m { + out = append(out, k) + } + sort.Strings(out) + + return out +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/deployment_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/deployment_test.go new file mode 100644 index 0000000000..1dc6664f7e --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/deployment_test.go @@ -0,0 +1,358 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFindReferencingConfigMapsInDeployment(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifest string + expected []string + }{ + { + name: "no configmap", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 +`, + expected: nil, + }, + { + name: "one configmap", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + configMap: + name: canary-by-config-change +`, + expected: []string{ + "canary-by-config-change", + }, + }, + { + name: "multiple configmaps", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + spec: + initContainers: + - name: init + image: gcr.io/pipecd/helloworld:v0.5.0 + env: + - name: env1 + valueFrom: + configMapKeyRef: + name: init-configmap-1 + key: key1 + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + env: + - name: env1 + valueFrom: + configMapKeyRef: + name: configmap-1 + key: key1 + - name: env2 + valueFrom: + configMapKeyRef: + name: configmap-2 + key: key2 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + configMap: + name: canary-by-config-change + - name: config2 + configMap: + name: configmap-2 +`, + expected: []string{ + "canary-by-config-change", + "configmap-1", + "configmap-2", + "init-configmap-1", + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := ParseManifests(tc.manifest) + require.NoError(t, err) + require.Equal(t, 1, len(manifests)) + + d := &appsv1.Deployment{} + err = manifests[0].ConvertToStructuredObject(d) + require.NoError(t, err) + + out := FindReferencingConfigMapsInDeployment(d) + assert.Equal(t, tc.expected, out) + }) + } +} + +func TestFindReferencingSecretsInDeployment(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifest string + expected []string + }{ + { + name: "no secret", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 +`, + expected: nil, + }, + { + name: "one secret", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + secret: + secretName: canary-by-config-change +`, + expected: []string{ + "canary-by-config-change", + }, + }, + { + name: "multiple secrets", + manifest: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary-by-config-change + labels: + app: canary-by-config-change +spec: + replicas: 2 + selector: + matchLabels: + app: canary-by-config-change + pipecd.dev/variant: primary + template: + metadata: + labels: + app: canary-by-config-change + pipecd.dev/variant: primary + spec: + initContainers: + - name: init + image: gcr.io/pipecd/helloworld:v0.5.0 + env: + - name: env1 + valueFrom: + secretKeyRef: + name: init-secret-1 + key: key1 + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.5.0 + args: + - server + ports: + - containerPort: 9085 + env: + - name: env1 + valueFrom: + secretKeyRef: + name: secret-1 + key: key1 + - name: env2 + valueFrom: + secretKeyRef: + name: secret-2 + key: key2 + volumeMounts: + - name: config + mountPath: /etc/pipecd-config + readOnly: true + volumes: + - name: config + secret: + secretName: canary-by-config-change + - name: config2 + secret: + secretName: secret-2 +`, + expected: []string{ + "canary-by-config-change", + "init-secret-1", + "secret-1", + "secret-2", + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := ParseManifests(tc.manifest) + require.NoError(t, err) + require.Equal(t, 1, len(manifests)) + + d := &appsv1.Deployment{} + err = manifests[0].ConvertToStructuredObject(d) + require.NoError(t, err) + + out := FindReferencingSecretsInDeployment(d) + assert.Equal(t, tc.expected, out) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/diff.go b/pkg/app/pipedv1/platformprovider/kubernetes/diff.go new file mode 100644 index 0000000000..7c868cfbd2 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/diff.go @@ -0,0 +1,298 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "sort" + "strings" + + "go.uber.org/zap" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/pipe-cd/pipecd/pkg/diff" +) + +const ( + diffCommand = "diff" +) + +type DiffListResult struct { + Adds []Manifest + Deletes []Manifest + Changes []DiffListChange +} + +func (r *DiffListResult) NoChange() bool { + return len(r.Adds)+len(r.Deletes)+len(r.Changes) == 0 +} + +type DiffListChange struct { + Old Manifest + New Manifest + Diff *diff.Result +} + +func Diff(old, new Manifest, logger *zap.Logger, opts ...diff.Option) (*diff.Result, error) { + if old.Key.IsSecret() && new.Key.IsSecret() { + var err error + old.u, err = normalizeNewSecret(old.u, new.u) + if err != nil { + return nil, err + } + } + + key := old.Key.String() + normalized, err := remarshal(new.u) + if err != nil { + logger.Info("compare manifests directly since it was unable to remarshal Kubernetes manifest to normalize special fields", zap.Error(err)) + return diff.DiffUnstructureds(*old.u, *new.u, key, opts...) + } + + return diff.DiffUnstructureds(*old.u, *normalized, key, opts...) +} + +func DiffList(olds, news []Manifest, logger *zap.Logger, opts ...diff.Option) (*DiffListResult, error) { + adds, deletes, newChanges, oldChanges := groupManifests(olds, news) + cr := &DiffListResult{ + Adds: adds, + Deletes: deletes, + Changes: make([]DiffListChange, 0, len(newChanges)), + } + + for i := 0; i < len(newChanges); i++ { + result, err := Diff(oldChanges[i], newChanges[i], logger, opts...) + if err != nil { + return nil, err + } + if !result.HasDiff() { + continue + } + cr.Changes = append(cr.Changes, DiffListChange{ + Old: oldChanges[i], + New: newChanges[i], + Diff: result, + }) + } + + return cr, nil +} + +func normalizeNewSecret(old, new *unstructured.Unstructured) (*unstructured.Unstructured, error) { + var o, n v1.Secret + runtime.DefaultUnstructuredConverter.FromUnstructured(old.Object, &o) + runtime.DefaultUnstructuredConverter.FromUnstructured(new.Object, &n) + + // Move as much as possible fields from `o.Data` to `o.StringData` to make `o` close to `n` to minimize the diff. + for k, v := range o.Data { + // Skip if the field also exists in StringData. + if _, ok := o.StringData[k]; ok { + continue + } + + if _, ok := n.StringData[k]; !ok { + continue + } + + if o.StringData == nil { + o.StringData = make(map[string]string) + } + + // If the field is existing in `n.StringData`, we should move that field from `o.Data` to `o.StringData` + o.StringData[k] = string(v) + delete(o.Data, k) + } + + newO, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&o) + if err != nil { + return nil, err + } + + return &unstructured.Unstructured{Object: newO}, nil +} + +type DiffRenderOptions struct { + MaskSecret bool + MaskConfigMap bool + // Maximum number of changed manifests should be shown. + // Zero means rendering all. + MaxChangedManifests int + // If true, use "diff" command to render. + UseDiffCommand bool +} + +func (r *DiffListResult) Render(opt DiffRenderOptions) string { + var b strings.Builder + index := 0 + for _, delete := range r.Deletes { + index++ + b.WriteString(fmt.Sprintf("- %d. %s\n\n", index, delete.Key.ReadableString())) + } + for _, add := range r.Adds { + index++ + b.WriteString(fmt.Sprintf("+ %d. %s\n\n", index, add.Key.ReadableString())) + } + + maxPrintDiffs := len(r.Changes) + if opt.MaxChangedManifests != 0 && opt.MaxChangedManifests < maxPrintDiffs { + maxPrintDiffs = opt.MaxChangedManifests + } + + var prints = 0 + for _, change := range r.Changes { + key := change.Old.Key + opts := []diff.RenderOption{ + diff.WithLeftPadding(1), + } + + needMaskValue := false + if opt.MaskSecret && key.IsSecret() { + opts = append(opts, diff.WithMaskPath("data")) + needMaskValue = true + } else if opt.MaskConfigMap && key.IsConfigMap() { + opts = append(opts, diff.WithMaskPath("data")) + needMaskValue = true + } + renderer := diff.NewRenderer(opts...) + + index++ + b.WriteString(fmt.Sprintf("# %d. %s\n\n", index, key.ReadableString())) + + // Use our diff check in one of the following cases: + // - not explicit set useDiffCommand option. + // - requires masking secret or configmap value. + if !opt.UseDiffCommand || needMaskValue { + b.WriteString(renderer.Render(change.Diff.Nodes())) + } else { + // TODO: Find a way to mask values in case of using unix `diff` command. + d, err := diffByCommand(diffCommand, change.Old, change.New) + if err != nil { + b.WriteString(fmt.Sprintf("An error occurred while rendering diff (%v)", err)) + } else { + b.Write(d) + } + } + b.WriteString("\n") + + prints++ + if prints >= maxPrintDiffs { + break + } + } + + if prints < len(r.Changes) { + b.WriteString(fmt.Sprintf("... (omitted %d other changed manifests\n", len(r.Changes)-prints)) + } + + return b.String() +} + +func diffByCommand(command string, old, new Manifest) ([]byte, error) { + oldBytes, err := old.YamlBytes() + if err != nil { + return nil, err + } + + newBytes, err := new.YamlBytes() + if err != nil { + return nil, err + } + + oldFile, err := os.CreateTemp("", "old") + if err != nil { + return nil, err + } + defer os.Remove(oldFile.Name()) + if _, err := oldFile.Write(oldBytes); err != nil { + return nil, err + } + + newFile, err := os.CreateTemp("", "new") + if err != nil { + return nil, err + } + defer os.Remove(newFile.Name()) + if _, err := newFile.Write(newBytes); err != nil { + return nil, err + } + + var stdout, stderr bytes.Buffer + cmd := exec.Command(command, "-u", "-N", oldFile.Name(), newFile.Name()) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + if stdout.Len() > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + if err != nil { + return nil, fmt.Errorf("failed to run diff, err = %w, %s", err, stderr.String()) + } + + // Remove two-line header from output. + data := bytes.TrimSpace(stdout.Bytes()) + rows := bytes.SplitN(data, []byte("\n"), 3) + if len(rows) == 3 { + return rows[2], nil + } + return data, nil +} + +func groupManifests(olds, news []Manifest) (adds, deletes, newChanges, oldChanges []Manifest) { + // Sort the manifests before comparing. + sort.Slice(news, func(i, j int) bool { + return news[i].Key.IsLessWithIgnoringNamespace(news[j].Key) + }) + sort.Slice(olds, func(i, j int) bool { + return olds[i].Key.IsLessWithIgnoringNamespace(olds[j].Key) + }) + + var n, o int + for { + if n >= len(news) || o >= len(olds) { + break + } + if news[n].Key.IsEqualWithIgnoringNamespace(olds[o].Key) { + newChanges = append(newChanges, news[n]) + oldChanges = append(oldChanges, olds[o]) + n++ + o++ + continue + } + // Has in news but not in olds so this should be a added one. + if news[n].Key.IsLessWithIgnoringNamespace(olds[o].Key) { + adds = append(adds, news[n]) + n++ + continue + } + // Has in olds but not in news so this should be an deleted one. + deletes = append(deletes, olds[o]) + o++ + } + + if len(news) > n { + adds = append(adds, news[n:]...) + } + if len(olds) > o { + deletes = append(deletes, olds[o:]...) + } + return +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/diff_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/diff_test.go new file mode 100644 index 0000000000..3ba3764ebe --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/diff_test.go @@ -0,0 +1,407 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/diff" +) + +func TestGroupManifests(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + olds []Manifest + news []Manifest + expectedAdds []Manifest + expectedDeletes []Manifest + expectedNewChanges []Manifest + expectedOldChanges []Manifest + }{ + { + name: "empty list", + }, + { + name: "only adds", + news: []Manifest{ + {Key: ResourceKey{Name: "b"}}, + {Key: ResourceKey{Name: "a"}}, + }, + expectedAdds: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "b"}}, + }, + }, + { + name: "only deletes", + olds: []Manifest{ + {Key: ResourceKey{Name: "b"}}, + {Key: ResourceKey{Name: "a"}}, + }, + expectedDeletes: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "b"}}, + }, + }, + { + name: "only inters", + olds: []Manifest{ + {Key: ResourceKey{Name: "b"}}, + {Key: ResourceKey{Name: "a"}}, + }, + news: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "b"}}, + }, + expectedNewChanges: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "b"}}, + }, + expectedOldChanges: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "b"}}, + }, + }, + { + name: "all kinds", + olds: []Manifest{ + {Key: ResourceKey{Name: "b"}}, + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "c"}}, + }, + news: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "d"}}, + {Key: ResourceKey{Name: "b"}}, + }, + expectedAdds: []Manifest{ + {Key: ResourceKey{Name: "d"}}, + }, + expectedDeletes: []Manifest{ + {Key: ResourceKey{Name: "c"}}, + }, + expectedNewChanges: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "b"}}, + }, + expectedOldChanges: []Manifest{ + {Key: ResourceKey{Name: "a"}}, + {Key: ResourceKey{Name: "b"}}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + adds, deletes, newChanges, oldChanges := groupManifests(tc.olds, tc.news) + assert.Equal(t, tc.expectedAdds, adds) + assert.Equal(t, tc.expectedDeletes, deletes) + assert.Equal(t, tc.expectedNewChanges, newChanges) + assert.Equal(t, tc.expectedOldChanges, oldChanges) + }) + } +} + +func TestDiffByCommand(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + command string + manifests string + expected string + expectedErr bool + }{ + { + name: "no command", + command: "non-existent-diff", + manifests: "testdata/diff_by_command_no_change.yaml", + expected: "", + expectedErr: true, + }, + { + name: "no diff", + command: diffCommand, + manifests: "testdata/diff_by_command_no_change.yaml", + expected: "", + }, + { + name: "has diff", + command: diffCommand, + manifests: "testdata/diff_by_command.yaml", + expected: `@@ -6,7 +6,7 @@ + pipecd.dev/managed-by: piped + name: simple + spec: +- replicas: 2 ++ replicas: 3 + selector: + matchLabels: + app: simple +@@ -18,6 +18,7 @@ + containers: + - args: + - a ++ - d + - b + - c + image: gcr.io/pipecd/first:v1.0.0 +@@ -26,7 +27,6 @@ + - containerPort: 9085 + - args: + - xx +- - yy + - zz + image: gcr.io/pipecd/second:v1.0.0 + name: second`, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := LoadManifestsFromYAMLFile(tc.manifests) + require.NoError(t, err) + require.Equal(t, 2, len(manifests)) + + got, err := diffByCommand(tc.command, manifests[0], manifests[1]) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tc.expected, string(got)) + }) + } +} + +func TestDiff(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifests string + expected string + diffNum int + falsePositive bool + }{ + { + name: "Secret no diff 1", + manifests: `apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +--- +apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +`, + expected: "", + diffNum: 0, + }, + { + name: "Secret no diff 2", + manifests: `apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + password: hoge +stringData: + foo: bar +--- +apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + password: hoge +stringData: + foo: bar +`, + expected: "", + diffNum: 0, + }, + { + name: "Secret no diff with merge", + manifests: `apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + password: hoge + foo: YmFy +--- +apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + password: hoge +stringData: + foo: bar +`, + expected: "", + diffNum: 0, + }, + { + name: "Secret no diff override false-positive", + manifests: `apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + password: hoge + foo: YmFy +--- +apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + password: hoge + foo: Zm9v +stringData: + foo: bar +`, + expected: "", + diffNum: 0, + falsePositive: true, + }, + { + name: "Secret has diff", + manifests: `apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + foo: YmFy +--- +apiVersion: apps/v1 +kind: Secret +metadata: + name: secret-management +data: + password: hoge +stringData: + foo: bar +`, + expected: ` #data ++ data: ++ password: hoge + +`, + diffNum: 1, + }, + { + name: "Pod no diff 1", + manifests: `apiVersion: v1 +kind: Pod +metadata: + name: static-web + labels: + role: myrole +spec: + containers: + - name: web + image: nginx + resources: + limits: + memory: "2Gi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: static-web + labels: + role: myrole +spec: + containers: + - name: web + image: nginx + ports: + resources: + limits: + memory: "2Gi" +`, + expected: "", + diffNum: 0, + falsePositive: false, + }, + { + name: "Pod no diff 2", + manifests: `apiVersion: v1 +kind: Pod +metadata: + name: static-web + labels: + role: myrole +spec: + containers: + - name: web + image: nginx + resources: + limits: + memory: "1536Mi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: static-web + labels: + role: myrole +spec: + containers: + - name: web + image: nginx + ports: + resources: + limits: + memory: "1.5Gi" +`, + expected: "", + diffNum: 0, + falsePositive: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := ParseManifests(tc.manifests) + require.NoError(t, err) + require.Equal(t, 2, len(manifests)) + old, new := manifests[0], manifests[1] + + result, err := Diff(old, new, zap.NewNop(), diff.WithEquateEmpty(), diff.WithIgnoreAddingMapKeys(), diff.WithCompareNumberAndNumericString()) + require.NoError(t, err) + + renderer := diff.NewRenderer(diff.WithLeftPadding(1)) + ds := renderer.Render(result.Nodes()) + if tc.falsePositive { + assert.NotEqual(t, tc.diffNum, result.NumNodes()) + assert.NotEqual(t, tc.expected, ds) + } else { + assert.Equal(t, tc.diffNum, result.NumNodes()) + assert.Equal(t, tc.expected, ds) + } + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/diffutil.go b/pkg/app/pipedv1/platformprovider/kubernetes/diffutil.go new file mode 100644 index 0000000000..957747df78 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/diffutil.go @@ -0,0 +1,120 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "bytes" + "encoding/json" + "reflect" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" +) + +// All functions in this file is borrowed from argocd/gitops-engine and modified +// All function except `remarshal` is borrowed from +// https://github.com/argoproj/gitops-engine/blob/0bc2f8c395f67123156d4ce6b667bf730618307f/pkg/utils/json/json.go +// and `remarshal` function is borrowed from +// https://github.com/argoproj/gitops-engine/blob/b0c5e00ccfa5d1e73087a18dc59e2e4c72f5f175/pkg/diff/diff.go#L685-L723 + +// https://github.com/ksonnet/ksonnet/blob/master/pkg/kubecfg/diff.go +func removeFields(config, live interface{}) interface{} { + switch c := config.(type) { + case map[string]interface{}: + l, ok := live.(map[string]interface{}) + if ok { + return removeMapFields(c, l) + } + return live + case []interface{}: + l, ok := live.([]interface{}) + if ok { + return removeListFields(c, l) + } + return live + default: + return live + } + +} + +// removeMapFields remove all non-existent fields in the live that don't exist in the config +func removeMapFields(config, live map[string]interface{}) map[string]interface{} { + result := map[string]interface{}{} + for k, v1 := range config { + v2, ok := live[k] + if !ok { + continue + } + if v2 != nil { + v2 = removeFields(v1, v2) + } + result[k] = v2 + } + return result +} + +func removeListFields(config, live []interface{}) []interface{} { + // If live is longer than config, then the extra elements at the end of the + // list will be returned as-is so they appear in the diff. + result := make([]interface{}, 0, len(live)) + for i, v2 := range live { + if len(config) > i { + if v2 != nil { + v2 = removeFields(config[i], v2) + } + result = append(result, v2) + } else { + result = append(result, v2) + } + } + return result +} + +// remarshal checks resource kind and version and re-marshal using corresponding struct custom marshaller. +// This ensures that expected resource state is formatter same as actual resource state in kubernetes +// and allows to find differences between actual and target states more accurately. +// Remarshalling also strips any type information (e.g. float64 vs. int) from the unstructured +// object. This is important for diffing since it will cause godiff to report a false difference. +func remarshal(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + data, err := json.Marshal(obj) + if err != nil { + return nil, err + } + item, err := scheme.Scheme.New(obj.GroupVersionKind()) + if err != nil { + // This is common. the scheme is not registered + return nil, err + } + // This will drop any omitempty fields, perform resource conversion etc... + unmarshalledObj := reflect.New(reflect.TypeOf(item).Elem()).Interface() + // Unmarshal data into unmarshalledObj, but detect if there are any unknown fields that are not + // found in the target GVK object. + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&unmarshalledObj); err != nil { + // Likely a field present in obj that is not present in the GVK type, or user + // may have specified an invalid spec in git, so return original object + return nil, err + } + unstrBody, err := runtime.DefaultUnstructuredConverter.ToUnstructured(unmarshalledObj) + if err != nil { + return nil, err + } + // Remove all default values specified by custom formatter (e.g. creationTimestamp) + unstrBody = removeMapFields(obj.Object, unstrBody) + return &unstructured.Unstructured{Object: unstrBody}, nil +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/diffutil_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/diffutil_test.go new file mode 100644 index 0000000000..247500465c --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/diffutil_test.go @@ -0,0 +1,218 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRemoveMapFields(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + config map[string]interface{} + live map[string]interface{} + expected map[string]interface{} + }{ + { + name: "Empty map", + config: make(map[string]interface{}, 0), + live: make(map[string]interface{}, 0), + expected: make(map[string]interface{}, 0), + }, + { + name: "Not nested 1", + config: map[string]interface{}{ + "key a": "value a", + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": "value b", + }, + expected: map[string]interface{}{ + "key a": "value a", + }, + }, + { + name: "Not nested 2", + config: map[string]interface{}{ + "key a": "value a", + "key b": "value b", + }, + live: map[string]interface{}{ + "key a": "value a", + }, + expected: map[string]interface{}{ + "key a": "value a", + }, + }, + { + name: "Nested live deleted", + config: map[string]interface{}{ + "key a": "value a", + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": map[string]interface{}{ + "nested key a": "nested value a", + }, + }, + expected: map[string]interface{}{ + "key a": "value a", + }, + }, + { + name: "Nested same", + config: map[string]interface{}{ + "key a": "value a", + "key b": map[string]interface{}{ + "nested key a": "nested value a", + }, + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": map[string]interface{}{ + "nested key a": "nested value a", + }, + }, + expected: map[string]interface{}{ + "key a": "value a", + "key b": map[string]interface{}{ + "nested key a": "nested value a", + }, + }, + }, + { + name: "Nested nested live deleted", + config: map[string]interface{}{ + "key a": "value a", + "key b": map[string]interface{}{ + "nested key a": "nested value a", + }, + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": map[string]interface{}{ + "nested key a": "nested value a", + "nested key b": "nested value b", + }, + }, + expected: map[string]interface{}{ + "key a": "value a", + "key b": map[string]interface{}{ + "nested key a": "nested value a", + }, + }, + }, + { + name: "Nested array", + config: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", 3, + }, + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", 3, + }, + }, + expected: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", 3, + }, + }, + }, + { + name: "Nested array 2", + config: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", 3, 4, + }, + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", 3, + }, + }, + expected: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", 3, + }, + }, + }, + { + name: "Nested array remain", + config: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", + }, + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", map[string]interface{}{ + "aa": "aa", + }, + }, + }, + expected: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", map[string]interface{}{ + "aa": "aa", + }, + }, + }, + }, + { + name: "Nested array same", + config: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "a", "b", 3, + }, + }, + live: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "b", "a", 3, + }, + }, + expected: map[string]interface{}{ + "key a": "value a", + "key b": []interface{}{ + "b", "a", 3, + }, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + removed := removeMapFields(tc.config, tc.live) + assert.Equal(t, tc.expected, removed) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/hasher.go b/pkg/app/pipedv1/platformprovider/kubernetes/hasher.go new file mode 100644 index 0000000000..1e787e23c0 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/hasher.go @@ -0,0 +1,157 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Copyright 2017 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + + v1 "k8s.io/api/core/v1" +) + +// HashManifests computes the hash of a list of manifests. +func HashManifests(manifests []Manifest) (string, error) { + if len(manifests) == 0 { + return "", errors.New("no manifest to hash") + } + + hasher := sha256.New() + for _, m := range manifests { + var encoded string + var err error + + switch { + case m.Key.IsConfigMap(): + obj := &v1.ConfigMap{} + if err := m.ConvertToStructuredObject(obj); err != nil { + return "", err + } + encoded, err = encodeConfigMap(obj) + case m.Key.IsSecret(): + obj := &v1.Secret{} + if err := m.ConvertToStructuredObject(obj); err != nil { + return "", err + } + encoded, err = encodeSecret(obj) + default: + var encodedBytes []byte + encodedBytes, err = m.MarshalJSON() + encoded = string(encodedBytes) + } + + if err != nil { + return "", err + } + if _, err := hasher.Write([]byte(encoded)); err != nil { + return "", err + } + } + + hex := fmt.Sprintf("%x", hasher.Sum(nil)) + return encodeHash(hex) +} + +// Borrowed from https://github.com/kubernetes/kubernetes/blob/ +// ea0764452222146c47ec826977f49d7001b0ea8c/staging/src/k8s.io/kubectl/pkg/util/hash/hash.go +// encodeHash extracts the first 40 bits of the hash from the hex string +// (1 hex char represents 4 bits), and then maps vowels and vowel-like hex +// characters to consonants to prevent bad words from being formed (the theory +// is that no vowels makes it really hard to make bad words). Since the string +// is hex, the only vowels it can contain are 'a' and 'e'. +// We picked some arbitrary consonants to map to from the same character set as GenerateName. +// See: https://github.com/kubernetes/apimachinery/blob/dc1f89aff9a7509782bde3b68824c8043a3e58cc/pkg/util/rand/rand.go#L75 +// If the hex string contains fewer than ten characters, returns an error. +func encodeHash(hex string) (string, error) { + if len(hex) < 10 { + return "", errors.New("the hex string must contain at least 10 characters") + } + enc := []rune(hex[:10]) + for i := range enc { + switch enc[i] { + case '0': + enc[i] = 'g' + case '1': + enc[i] = 'h' + case '3': + enc[i] = 'k' + case 'a': + enc[i] = 'm' + case 'e': + enc[i] = 't' + } + } + return string(enc), nil +} + +// Borrowed from https://github.com/kubernetes/kubernetes/blob/ +// ea0764452222146c47ec826977f49d7001b0ea8c/staging/src/k8s.io/kubectl/pkg/util/hash/hash.go +// encodeConfigMap encodes a ConfigMap. +// Data, Kind, and Name are taken into account. +func encodeConfigMap(cm *v1.ConfigMap) (string, error) { + // json.Marshal sorts the keys in a stable order in the encoding + m := map[string]interface{}{ + "kind": "ConfigMap", + "name": cm.Name, + "data": cm.Data, + } + if cm.Immutable != nil { + m["immutable"] = *cm.Immutable + } + if len(cm.BinaryData) > 0 { + m["binaryData"] = cm.BinaryData + } + data, err := json.Marshal(m) + if err != nil { + return "", err + } + return string(data), nil +} + +// Borrowed from https://github.com/kubernetes/kubernetes/blob/ +// ea0764452222146c47ec826977f49d7001b0ea8c/staging/src/k8s.io/kubectl/pkg/util/hash/hash.go +// encodeSecret encodes a Secret. +// Data, Kind, Name, and Type are taken into account. +func encodeSecret(sec *v1.Secret) (string, error) { + m := map[string]interface{}{ + "kind": "Secret", + "type": sec.Type, + "name": sec.Name, + "data": sec.Data, + } + if sec.Immutable != nil { + m["immutable"] = *sec.Immutable + } + // json.Marshal sorts the keys in a stable order in the encoding + data, err := json.Marshal(m) + if err != nil { + return "", err + } + return string(data), nil +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/hasher_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/hasher_test.go new file mode 100644 index 0000000000..4213f3fa2d --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/hasher_test.go @@ -0,0 +1,169 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHashManifests(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + manifests string + expected string + expectedError error + }{ + { + name: "no manifests", + expectedError: errors.New("no manifest to hash"), + }, + { + name: "configmap: emptydata", + manifests: ` +apiVersion: v1 +kind: ConfigMap +data: {} +binaryData: {} +`, + expected: "42745tchd9", + }, + { + name: "configmap: one key", + manifests: ` +apiVersion: v1 +kind: ConfigMap +data: + one: "" +binaryData: {} +`, + expected: "9g67k2htb6", + }, + { + name: "configmap: there keys for checking order", + manifests: ` +apiVersion: v1 +kind: ConfigMap +data: + two: "2" + one: "" + three: "3" +binaryData: {} +`, + expected: "f5h7t85m9b", + }, + { + name: "secret: emptydata", + manifests: ` +apiVersion: v1 +kind: Secret +type: my-type +data: {} +`, + expected: "t75bgf6ctb", + }, + { + name: "secret: one key", + manifests: ` +apiVersion: v1 +kind: Secret +type: my-type +data: + "one": "" +`, + expected: "74bd68bm66", + }, + { + name: "secret: there keys for checking order", + manifests: ` +apiVersion: v1 +kind: Secret +type: my-type +data: + two: Mg== + one: "" + three: Mw== +`, + expected: "dgcb6h9tmk", + }, + { + name: "multiple configs", + manifests: ` +apiVersion: v1 +kind: ConfigMap +data: + two: "2" + three: "3" +binaryData: {} +--- +apiVersion: v1 +kind: Secret +type: my-type +data: + one: "" + three: Mw== +`, + expected: "57hhd7795k", + }, + { + name: "not config manifest", + manifests: ` +apiVersion: apps/v1 +kind: Foo +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + component: foo + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 +`, + expected: "db48kd6689", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + manifests, err := ParseManifests(tc.manifests) + require.NoError(t, err) + + out, err := HashManifests(manifests) + assert.Equal(t, tc.expected, out) + assert.Equal(t, tc.expectedError, err) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/helm.go b/pkg/app/pipedv1/platformprovider/kubernetes/helm.go new file mode 100644 index 0000000000..173b04f18d --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/helm.go @@ -0,0 +1,425 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "bytes" + "context" + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/chartrepo" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" + "github.com/pipe-cd/pipecd/pkg/config" +) + +var ( + allowedURLSchemes = []string{"http", "https"} +) + +type Helm struct { + version string + execPath string + logger *zap.Logger +} + +func NewHelm(version, path string, logger *zap.Logger) *Helm { + return &Helm{ + version: version, + execPath: path, + logger: logger, + } +} + +func (h *Helm) TemplateLocalChart(ctx context.Context, appName, appDir, namespace, chartPath string, opts *config.InputHelmOptions) (string, error) { + releaseName := appName + if opts != nil && opts.ReleaseName != "" { + releaseName = opts.ReleaseName + } + + args := []string{ + "template", + "--no-hooks", + "--include-crds", + releaseName, + chartPath, + } + + if namespace != "" { + args = append(args, fmt.Sprintf("--namespace=%s", namespace)) + } + + if opts != nil { + for k, v := range opts.SetValues { + args = append(args, "--set", fmt.Sprintf("%s=%s", k, v)) + } + for _, v := range opts.ValueFiles { + if err := verifyHelmValueFilePath(appDir, v); err != nil { + h.logger.Error("failed to verify values file path", zap.Error(err)) + return "", err + } + args = append(args, "-f", v) + } + for k, v := range opts.SetFiles { + args = append(args, "--set-file", fmt.Sprintf("%s=%s", k, v)) + } + for _, v := range opts.APIVersions { + args = append(args, "--api-versions", v) + } + if opts.KubeVersion != "" { + args = append(args, "--kube-version", opts.KubeVersion) + } + } + + var stdout, stderr bytes.Buffer + cmd := exec.CommandContext(ctx, h.execPath, args...) + cmd.Dir = appDir + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + h.logger.Info(fmt.Sprintf("start templating a local chart (or cloned remote git chart) for application %s", appName), + zap.Any("args", args), + ) + + if err := cmd.Run(); err != nil { + return stdout.String(), fmt.Errorf("%w: %s", err, stderr.String()) + } + return stdout.String(), nil +} + +type helmRemoteGitChart struct { + GitRemote string + Ref string + Path string +} + +func (h *Helm) TemplateRemoteGitChart(ctx context.Context, appName, appDir, namespace string, chart helmRemoteGitChart, gitClient gitClient, opts *config.InputHelmOptions) (string, error) { + // Firstly, we need to download the remote repository. + repoDir, err := os.MkdirTemp("", "helm-remote-chart") + if err != nil { + return "", fmt.Errorf("unable to created temporary directory for storing remote helm chart: %w", err) + } + defer os.RemoveAll(repoDir) + + repo, err := gitClient.Clone(ctx, chart.GitRemote, chart.GitRemote, "", repoDir) + if err != nil { + return "", fmt.Errorf("unable to clone git repository containing remote helm chart: %w", err) + } + + if chart.Ref != "" { + if err := repo.Checkout(ctx, chart.Ref); err != nil { + return "", fmt.Errorf("unable to checkout to specified ref %s: %w", chart.Ref, err) + } + } + chartPath := filepath.Join(repoDir, chart.Path) + + // After that handle it as a local chart. + return h.TemplateLocalChart(ctx, appName, appDir, namespace, chartPath, opts) +} + +type helmRemoteChart struct { + Repository string + Name string + Version string + Insecure bool +} + +func (h *Helm) TemplateRemoteChart(ctx context.Context, appName, appDir, namespace string, chart helmRemoteChart, opts *config.InputHelmOptions) (string, error) { + releaseName := appName + if opts != nil && opts.ReleaseName != "" { + releaseName = opts.ReleaseName + } + + args := []string{ + "template", + "--no-hooks", + "--include-crds", + releaseName, + fmt.Sprintf("%s/%s", chart.Repository, chart.Name), + fmt.Sprintf("--version=%s", chart.Version), + } + + if chart.Insecure { + args = append(args, "--insecure-skip-tls-verify") + } + + if namespace != "" { + args = append(args, fmt.Sprintf("--namespace=%s", namespace)) + } + + if opts != nil { + for k, v := range opts.SetValues { + args = append(args, "--set", fmt.Sprintf("%s=%s", k, v)) + } + for _, v := range opts.ValueFiles { + if err := verifyHelmValueFilePath(appDir, v); err != nil { + h.logger.Error("failed to verify values file path", zap.Error(err)) + return "", err + } + args = append(args, "-f", v) + } + for k, v := range opts.SetFiles { + args = append(args, "--set-file", fmt.Sprintf("%s=%s", k, v)) + } + for _, v := range opts.APIVersions { + args = append(args, "--api-versions", v) + } + if opts.KubeVersion != "" { + args = append(args, "--kube-version", opts.KubeVersion) + } + } + + h.logger.Info(fmt.Sprintf("start templating a chart from Helm repository for application %s", appName), + zap.Any("args", args), + ) + + executor := func() (string, error) { + var stdout, stderr bytes.Buffer + cmd := exec.CommandContext(ctx, h.execPath, args...) + cmd.Dir = appDir + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return stdout.String(), fmt.Errorf("%w: %s", err, stderr.String()) + } + return stdout.String(), nil + } + + out, err := executor() + if err == nil { + return out, nil + } + + if !strings.Contains(err.Error(), "helm repo update") { + return "", err + } + + // If the error is a "Not Found", we update the repositories and try again. + if e := chartrepo.Update(ctx, toolregistry.DefaultRegistry(), h.logger); e != nil { + h.logger.Error("failed to update Helm chart repositories", zap.Error(e)) + return "", err + } + return executor() +} + +// verifyHelmValueFilePath verifies if the path of the values file references +// a remote URL or inside the path where the application configuration file (i.e. *.pipecd.yaml) is located. +func verifyHelmValueFilePath(appDir, valueFilePath string) error { + url, err := url.Parse(valueFilePath) + if err == nil && url.Scheme != "" { + for _, s := range allowedURLSchemes { + if strings.EqualFold(url.Scheme, s) { + return nil + } + } + + return fmt.Errorf("scheme %s is not allowed to load values file", url.Scheme) + } + + // valueFilePath is a path where non-default Helm values file is located. + if !filepath.IsAbs(valueFilePath) { + valueFilePath = filepath.Join(appDir, valueFilePath) + } + + if isSymlink(valueFilePath) { + if valueFilePath, err = resolveSymlinkToAbsPath(valueFilePath, appDir); err != nil { + return err + } + } + + // If a path outside of appDir is specified as the path for the values file, + // it may indicate that someone trying to illegally read a file as values file that + // exists in the environment where Piped is running. + if !strings.HasPrefix(valueFilePath, appDir) { + return fmt.Errorf("values file %s references outside the application configuration directory", valueFilePath) + } + + return nil +} + +// isSymlink returns the path is whether symbolic link or not. +func isSymlink(path string) bool { + lstat, err := os.Lstat(path) + if err != nil { + return false + } + + return lstat.Mode()&os.ModeSymlink == os.ModeSymlink +} + +// resolveSymlinkToAbsPath resolves symbolic link to an absolute path. +func resolveSymlinkToAbsPath(path, absParentDir string) (string, error) { + resolved, err := os.Readlink(path) + if err != nil { + return "", err + } + + if !filepath.IsAbs(resolved) { + resolved = filepath.Join(absParentDir, resolved) + } + + return resolved, nil +} + +func (h *Helm) UpgradeLocalChart(ctx context.Context, appName, appDir, namespace, chartPath string, opts *config.InputHelmOptions) (string, error) { + releaseName := appName + if opts != nil && opts.ReleaseName != "" { + releaseName = opts.ReleaseName + } + + args := []string{ + "upgrade", + "--install", + releaseName, + chartPath, + } + + if namespace != "" { + args = append(args, fmt.Sprintf("--namespace=%s", namespace)) + } + + if opts != nil { + for k, v := range opts.SetValues { + args = append(args, "--set", fmt.Sprintf("%s=%s", k, v)) + } + for _, v := range opts.ValueFiles { + if err := verifyHelmValueFilePath(appDir, v); err != nil { + h.logger.Error("failed to verify values file path", zap.Error(err)) + return "", err + } + args = append(args, "-f", v) + } + for k, v := range opts.SetFiles { + args = append(args, "--set-file", fmt.Sprintf("%s=%s", k, v)) + } + } + var stdout, stderr bytes.Buffer + cmd := exec.CommandContext(ctx, h.execPath, args...) + cmd.Dir = appDir + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + h.logger.Info(fmt.Sprintf("start upgrading a release (or cloned remote git chart) for application %s", appName), + zap.Any("args", args), + ) + + if err := cmd.Run(); err != nil { + return stdout.String(), fmt.Errorf("%w: %s", err, stderr.String()) + } + return stdout.String(), nil +} + +func (h *Helm) UpgradeRemoteGitChart(ctx context.Context, appName, appDir, namespace string, chart helmRemoteGitChart, gitClient gitClient, opts *config.InputHelmOptions) (string, error) { + repoDir, err := os.MkdirTemp("", "helm-remote-chart") + if err != nil { + return "", fmt.Errorf("unable to created temporary directory for storing remote helm chart: %w", err) + } + defer os.RemoveAll(repoDir) + + repo, err := gitClient.Clone(ctx, chart.GitRemote, chart.GitRemote, "", repoDir) + if err != nil { + return "", fmt.Errorf("unable to clone git repository containing remote helm chart: %w", err) + } + + if chart.Ref != "" { + if err := repo.Checkout(ctx, chart.Ref); err != nil { + return "", fmt.Errorf("unable to checkout to specified ref %s: %w", chart.Ref, err) + } + } + chartPath := filepath.Join(repoDir, chart.Path) + + // After that handle it as a local chart. + return h.UpgradeLocalChart(ctx, appName, appDir, namespace, chartPath, opts) +} + +func (h *Helm) UpgradeRemoteChart(ctx context.Context, appName, appDir, namespace string, chart helmRemoteChart, gitClient gitClient, opts *config.InputHelmOptions) (string, error) { + releaseName := appName + if opts != nil && opts.ReleaseName != "" { + releaseName = opts.ReleaseName + } + + args := []string{ + "upgrade", + "--install", + releaseName, + fmt.Sprintf("%s/%s", chart.Repository, chart.Name), + fmt.Sprintf("--version=%s", chart.Version), + } + + if chart.Insecure { + args = append(args, "--insecure-skip-tls-verify") + } + + if namespace != "" { + args = append(args, fmt.Sprintf("--namespace=%s", namespace)) + } + + if opts != nil { + for k, v := range opts.SetValues { + args = append(args, "--set", fmt.Sprintf("%s=%s", k, v)) + } + for _, v := range opts.ValueFiles { + if err := verifyHelmValueFilePath(appDir, v); err != nil { + h.logger.Error("failed to verify values file path", zap.Error(err)) + return "", err + } + args = append(args, "-f", v) + } + for k, v := range opts.SetFiles { + args = append(args, "--set-file", fmt.Sprintf("%s=%s", k, v)) + } + } + + h.logger.Info(fmt.Sprintf("start upgrading a release from Helm repository for application %s", appName), + zap.Any("args", args), + ) + + executor := func() (string, error) { + var stdout, stderr bytes.Buffer + cmd := exec.CommandContext(ctx, h.execPath, args...) + cmd.Dir = appDir + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return stdout.String(), fmt.Errorf("%w: %s", err, stderr.String()) + } + return stdout.String(), nil + } + + out, err := executor() + if err == nil { + return out, nil + } + + if !strings.Contains(err.Error(), "helm repo update") { + return "", err + } + + // If the error is a "Not Found", we update the repositories and try again. + if e := chartrepo.Update(ctx, toolregistry.DefaultRegistry(), h.logger); e != nil { + h.logger.Error("failed to update Helm chart repositories", zap.Error(e)) + return "", err + } + return executor() + +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/helm_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/helm_test.go new file mode 100644 index 0000000000..cb5981a03e --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/helm_test.go @@ -0,0 +1,168 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" +) + +func TestTemplateLocalChart(t *testing.T) { + t.Parallel() + + var ( + ctx = context.Background() + appName = "testapp" + appDir = "testdata" + chartPath = "testchart" + ) + + // TODO: Preinstall a helm version inside CI runner to avoid installing. + helmPath, _, err := toolregistry.DefaultRegistry().Helm(ctx, "") + require.NoError(t, err) + + helm := NewHelm("", helmPath, zap.NewNop()) + out, err := helm.TemplateLocalChart(ctx, appName, appDir, "", chartPath, nil) + require.NoError(t, err) + + out = strings.TrimPrefix(out, "---") + manifests := strings.Split(out, "---") + assert.Equal(t, 3, len(manifests)) +} + +func TestTemplateLocalChart_WithNamespace(t *testing.T) { + t.Parallel() + + var ( + ctx = context.Background() + appName = "testapp" + appDir = "testdata" + chartPath = "testchart" + namespace = "testnamespace" + ) + + // TODO: Preinstall a helm version inside CI runner to avoid installing. + helmPath, _, err := toolregistry.DefaultRegistry().Helm(ctx, "") + require.NoError(t, err) + + helm := NewHelm("", helmPath, zap.NewNop()) + out, err := helm.TemplateLocalChart(ctx, appName, appDir, namespace, chartPath, nil) + require.NoError(t, err) + + out = strings.TrimPrefix(out, "---") + + manifests, _ := ParseManifests(out) + for _, manifest := range manifests { + metadata, err := manifest.GetNestedMap("metadata") + require.NoError(t, err) + require.Equal(t, namespace, metadata["namespace"]) + } +} + +func TestVerifyHelmValueFilePath(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + appDir string + valueFilePath string + wantErr bool + }{ + { + name: "Values file locates inside the app dir", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "values.yaml", + wantErr: false, + }, + { + name: "Values file locates inside the app dir (with ..)", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "../../../testdata/testhelm/appconfdir/values.yaml", + wantErr: false, + }, + { + name: "Values file locates under the app dir", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "dir/values.yaml", + wantErr: false, + }, + { + name: "Values file locates under the app dir (with ..)", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "../../../testdata/testhelm/appconfdir/dir/values.yaml", + wantErr: false, + }, + { + name: "arbitrary file locates outside the app dir", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "/etc/hosts", + wantErr: true, + }, + { + name: "arbitrary file locates outside the app dir (with ..)", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "../../../../../../../../../../../../etc/hosts", + wantErr: true, + }, + { + name: "Values file locates allowed remote URL (http)", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "http://exmaple.com/values.yaml", + wantErr: false, + }, + { + name: "Values file locates allowed remote URL (https)", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "https://exmaple.com/values.yaml", + wantErr: false, + }, + { + name: "Values file locates disallowed remote URL (ftp)", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "ftp://exmaple.com/values.yaml", + wantErr: true, + }, + { + name: "Values file is symlink targeting valid values file", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "valid-symlink", + wantErr: false, + }, + { + name: "Values file is symlink targeting invalid values file", + appDir: "testdata/testhelm/appconfdir", + valueFilePath: "invalid-symlink", + wantErr: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + err := verifyHelmValueFilePath(tc.appDir, tc.valueFilePath) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/kubectl.go b/pkg/app/pipedv1/platformprovider/kubernetes/kubectl.go new file mode 100644 index 0000000000..08ed066159 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/kubectl.go @@ -0,0 +1,246 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "bytes" + "context" + "errors" + "fmt" + "os/exec" + "strings" + + "k8s.io/client-go/rest" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes/kubernetesmetrics" +) + +var ( + errorReplaceNotFound = errors.New("specified resource is not found") + errorNotFoundLiteral = "Error from server (NotFound)" + errResourceAlreadyExists = errors.New("resource already exists") + errAlreadyExistsLiteral = "Error from server (AlreadyExists)" +) + +type Kubectl struct { + version string + execPath string + config *rest.Config +} + +func NewKubectl(version, path string) *Kubectl { + return &Kubectl{ + version: version, + execPath: path, + } +} + +func (c *Kubectl) Apply(ctx context.Context, kubeconfig, namespace string, manifest Manifest) (err error) { + defer func() { + kubernetesmetrics.IncKubectlCallsCounter( + c.version, + kubernetesmetrics.LabelApplyCommand, + err == nil, + ) + }() + + data, err := manifest.YamlBytes() + if err != nil { + return err + } + + args := make([]string, 0, 8) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + + args = append(args, "apply") + if annotation := manifest.GetAnnotations()[LabelServerSideApply]; annotation == UseServerSideApply { + args = append(args, "--server-side") + } + args = append(args, "-f", "-") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + r := bytes.NewReader(data) + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to apply: %s (%w)", string(out), err) + } + return nil +} + +func (c *Kubectl) Create(ctx context.Context, kubeconfig, namespace string, manifest Manifest) (err error) { + defer func() { + kubernetesmetrics.IncKubectlCallsCounter( + c.version, + kubernetesmetrics.LabelCreateCommand, + err == nil, + ) + }() + + data, err := manifest.YamlBytes() + if err != nil { + return err + } + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "create", "-f", "-") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + r := bytes.NewReader(data) + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create: %s (%w)", string(out), err) + } + return nil +} + +func (c *Kubectl) Replace(ctx context.Context, kubeconfig, namespace string, manifest Manifest) (err error) { + defer func() { + kubernetesmetrics.IncKubectlCallsCounter( + c.version, + kubernetesmetrics.LabelReplaceCommand, + err == nil, + ) + }() + + data, err := manifest.YamlBytes() + if err != nil { + return err + } + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "replace", "-f", "-") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + r := bytes.NewReader(data) + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err == nil { + return nil + } + + if strings.Contains(string(out), errorNotFoundLiteral) { + return errorReplaceNotFound + } + + return fmt.Errorf("failed to replace: %s (%w)", string(out), err) +} + +func (c *Kubectl) Delete(ctx context.Context, kubeconfig, namespace string, r ResourceKey) (err error) { + defer func() { + kubernetesmetrics.IncKubectlCallsCounter( + c.version, + kubernetesmetrics.LabelDeleteCommand, + err == nil, + ) + }() + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "delete", r.Kind, r.Name) + + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + + if strings.Contains(string(out), "(NotFound)") { + return fmt.Errorf("failed to delete: %s, (%w), %v", string(out), ErrNotFound, err) + } + if err != nil { + return fmt.Errorf("failed to delete: %s, %v", string(out), err) + } + return nil +} + +func (c *Kubectl) Get(ctx context.Context, kubeconfig, namespace string, r ResourceKey) (m Manifest, err error) { + defer func() { + kubernetesmetrics.IncKubectlCallsCounter( + c.version, + kubernetesmetrics.LabelGetCommand, + err == nil, + ) + }() + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "get", r.Kind, r.Name, "-o", "yaml") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + + if strings.Contains(string(out), "(NotFound)") { + return Manifest{}, fmt.Errorf("not found manifest %v, (%w), %v", r, ErrNotFound, err) + } + if err != nil { + return Manifest{}, fmt.Errorf("failed to get: %s, %v", string(out), err) + } + ms, err := ParseManifests(string(out)) + if err != nil { + return Manifest{}, fmt.Errorf("failed to parse manifests %v: %v", r, err) + } + if len(ms) == 0 { + return Manifest{}, fmt.Errorf("not found manifest %v, (%w)", r, ErrNotFound) + } + return ms[0], nil +} + +func (c *Kubectl) CreateNamespace(ctx context.Context, kubeconfig, namespace string) (err error) { + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + args = append(args, "create", "namespace", namespace) + + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + + if strings.Contains(string(out), errAlreadyExistsLiteral) { + return errResourceAlreadyExists + } + if err != nil { + return fmt.Errorf("failed to create namespace: %s, %v", string(out), err) + } + return nil +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/kubernetes.go b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetes.go new file mode 100644 index 0000000000..46995a2500 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetes.go @@ -0,0 +1,44 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "errors" +) + +var ( + ErrNotFound = errors.New("not found") +) + +const ( + LabelManagedBy = "pipecd.dev/managed-by" // Always be piped. + LabelPiped = "pipecd.dev/piped" // The id of piped handling this application. + LabelApplication = "pipecd.dev/application" // The application this resource belongs to. + LabelCommitHash = "pipecd.dev/commit-hash" // Hash value of the deployed commit. + LabelResourceKey = "pipecd.dev/resource-key" // The resource key generated by apiVersion, namespace and name. e.g. apps/v1/Deployment/namespace/demo-app + LabelOriginalAPIVersion = "pipecd.dev/original-api-version" // The api version defined in git configuration. e.g. apps/v1 + LabelIgnoreDriftDirection = "pipecd.dev/ignore-drift-detection" // Whether the drift detection should ignore this resource. + LabelSyncReplace = "pipecd.dev/sync-by-replace" // Use replace instead of apply. + LabelServerSideApply = "pipecd.dev/server-side-apply" // Use server side apply instead of client side apply. + AnnotationConfigHash = "pipecd.dev/config-hash" // The hash value of all mouting config resources. + AnnotationOrder = "pipecd.dev/order" // The order number of resource used to sort them before using. + + ManagedByPiped = "piped" + IgnoreDriftDetectionTrue = "true" + UseReplaceEnabled = "enabled" + UseServerSideApply = "true" + + kustomizationFileName = "kustomization.yaml" +) diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/kubernetes_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetes_test.go new file mode 100644 index 0000000000..5002798332 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetes_test.go @@ -0,0 +1,33 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "log" + "os" + "testing" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" +) + +func TestMain(m *testing.M) { + binDir := "/tmp/piped-bin" + if err := toolregistry.InitDefaultRegistry(binDir, zap.NewNop()); err != nil { + log.Fatal(err) + } + os.Exit(m.Run()) +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/kubernetesmetrics/metrics.go b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetesmetrics/metrics.go new file mode 100644 index 0000000000..d7575aeba9 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetesmetrics/metrics.go @@ -0,0 +1,81 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetesmetrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + toolKey = "tool" + versionKey = "version" + toolCommandKey = "command" + commandOutputKey = "status" +) + +type Tool string + +const ( + LabelToolKubectl Tool = "kubectl" +) + +type ToolCommand string + +const ( + LabelApplyCommand ToolCommand = "apply" + LabelCreateCommand ToolCommand = "create" + LabelReplaceCommand ToolCommand = "replace" + LabelDeleteCommand ToolCommand = "delete" + LabelGetCommand ToolCommand = "get" +) + +type CommandOutput string + +const ( + LabelOutputSuccess CommandOutput = "success" + LabelOutputFailre CommandOutput = "failure" +) + +var ( + toolCallsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cloudprovider_kubernetes_tool_calls_total", + Help: "Number of calls made to run the tool like kubectl, kustomize.", + }, + []string{ + toolKey, + versionKey, + toolCommandKey, + commandOutputKey, + }, + ) +) + +func IncKubectlCallsCounter(version string, command ToolCommand, success bool) { + status := LabelOutputSuccess + if !success { + status = LabelOutputFailre + } + toolCallsCounter.With(prometheus.Labels{ + toolKey: string(LabelToolKubectl), + versionKey: version, + toolCommandKey: string(command), + commandOutputKey: string(status), + }).Inc() +} + +func Register(r prometheus.Registerer) { + r.MustRegister(toolCallsCounter) +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/kubernetestest/kubernetes.mock.go b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetestest/kubernetes.mock.go new file mode 100644 index 0000000000..d2d2964f32 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/kubernetestest/kubernetes.mock.go @@ -0,0 +1,130 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes (interfaces: Applier,Loader) + +// Package kubernetestest is a generated GoMock package. +package kubernetestest + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + kubernetes "github.com/pipe-cd/pipecd/pkg/app/pipedv1/platformprovider/kubernetes" +) + +// MockApplier is a mock of Applier interface. +type MockApplier struct { + ctrl *gomock.Controller + recorder *MockApplierMockRecorder +} + +// MockApplierMockRecorder is the mock recorder for MockApplier. +type MockApplierMockRecorder struct { + mock *MockApplier +} + +// NewMockApplier creates a new mock instance. +func NewMockApplier(ctrl *gomock.Controller) *MockApplier { + mock := &MockApplier{ctrl: ctrl} + mock.recorder = &MockApplierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockApplier) EXPECT() *MockApplierMockRecorder { + return m.recorder +} + +// ApplyManifest mocks base method. +func (m *MockApplier) ApplyManifest(arg0 context.Context, arg1 kubernetes.Manifest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyManifest", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyManifest indicates an expected call of ApplyManifest. +func (mr *MockApplierMockRecorder) ApplyManifest(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyManifest", reflect.TypeOf((*MockApplier)(nil).ApplyManifest), arg0, arg1) +} + +// CreateManifest mocks base method. +func (m *MockApplier) CreateManifest(arg0 context.Context, arg1 kubernetes.Manifest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateManifest", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateManifest indicates an expected call of CreateManifest. +func (mr *MockApplierMockRecorder) CreateManifest(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateManifest", reflect.TypeOf((*MockApplier)(nil).CreateManifest), arg0, arg1) +} + +// Delete mocks base method. +func (m *MockApplier) Delete(arg0 context.Context, arg1 kubernetes.ResourceKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockApplierMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockApplier)(nil).Delete), arg0, arg1) +} + +// ReplaceManifest mocks base method. +func (m *MockApplier) ReplaceManifest(arg0 context.Context, arg1 kubernetes.Manifest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplaceManifest", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReplaceManifest indicates an expected call of ReplaceManifest. +func (mr *MockApplierMockRecorder) ReplaceManifest(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplaceManifest", reflect.TypeOf((*MockApplier)(nil).ReplaceManifest), arg0, arg1) +} + +// MockLoader is a mock of Loader interface. +type MockLoader struct { + ctrl *gomock.Controller + recorder *MockLoaderMockRecorder +} + +// MockLoaderMockRecorder is the mock recorder for MockLoader. +type MockLoaderMockRecorder struct { + mock *MockLoader +} + +// NewMockLoader creates a new mock instance. +func NewMockLoader(ctrl *gomock.Controller) *MockLoader { + mock := &MockLoader{ctrl: ctrl} + mock.recorder = &MockLoaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLoader) EXPECT() *MockLoaderMockRecorder { + return m.recorder +} + +// LoadManifests mocks base method. +func (m *MockLoader) LoadManifests(arg0 context.Context) ([]kubernetes.Manifest, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadManifests", arg0) + ret0, _ := ret[0].([]kubernetes.Manifest) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LoadManifests indicates an expected call of LoadManifests. +func (mr *MockLoaderMockRecorder) LoadManifests(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadManifests", reflect.TypeOf((*MockLoader)(nil).LoadManifests), arg0) +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/kustomize.go b/pkg/app/pipedv1/platformprovider/kubernetes/kustomize.go new file mode 100644 index 0000000000..644fb1f55f --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/kustomize.go @@ -0,0 +1,67 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "bytes" + "context" + "fmt" + "os/exec" + + "go.uber.org/zap" +) + +type Kustomize struct { + version string + execPath string + logger *zap.Logger +} + +func NewKustomize(version, path string, logger *zap.Logger) *Kustomize { + return &Kustomize{ + version: version, + execPath: path, + logger: logger, + } +} + +func (c *Kustomize) Template(ctx context.Context, appName, appDir string, opts map[string]string) (string, error) { + args := []string{ + "build", + ".", + } + + for k, v := range opts { + args = append(args, fmt.Sprintf("--%s", k)) + if v != "" { + args = append(args, v) + } + } + + var stdout, stderr bytes.Buffer + cmd := exec.CommandContext(ctx, c.execPath, args...) + cmd.Dir = appDir + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + c.logger.Info(fmt.Sprintf("start templating a Kustomize application %s", appName), + zap.Any("args", args), + ) + + if err := cmd.Run(); err != nil { + return stdout.String(), fmt.Errorf("%w: %s", err, stderr.String()) + } + return stdout.String(), nil +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/kustomize_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/kustomize_test.go new file mode 100644 index 0000000000..1c550c25e4 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/kustomize_test.go @@ -0,0 +1,46 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" +) + +func TestKustomizeTemplate(t *testing.T) { + t.Parallel() + + var ( + ctx = context.TODO() + appName = "testapp" + appDir = "testdata/testkustomize" + ) + + kustomizePath, _, err := toolregistry.DefaultRegistry().Kustomize(ctx, "") + require.NoError(t, err) + + kustomize := NewKustomize("", kustomizePath, zap.NewNop()) + out, err := kustomize.Template(ctx, appName, appDir, map[string]string{ + "load_restrictor": "LoadRestrictionsNone", + }) + require.NoError(t, err) + assert.True(t, len(out) > 0) +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/loader.go b/pkg/app/pipedv1/platformprovider/kubernetes/loader.go new file mode 100644 index 0000000000..60d7ef311b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/loader.go @@ -0,0 +1,228 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/toolregistry" + "github.com/pipe-cd/pipecd/pkg/config" + "github.com/pipe-cd/pipecd/pkg/git" +) + +type TemplatingMethod string + +const ( + TemplatingMethodHelm TemplatingMethod = "helm" + TemplatingMethodKustomize TemplatingMethod = "kustomize" + TemplatingMethodNone TemplatingMethod = "none" +) + +type Loader interface { + // LoadManifests renders and loads all manifests for application. + LoadManifests(ctx context.Context) ([]Manifest, error) +} + +type gitClient interface { + Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) +} + +type loader struct { + appName string + appDir string + repoDir string + configFileName string + input config.KubernetesDeploymentInput + gc gitClient + logger *zap.Logger + + templatingMethod TemplatingMethod + kustomize *Kustomize + helm *Helm + initOnce sync.Once + initErr error +} + +func NewLoader( + appName, appDir, repoDir, configFileName string, + input config.KubernetesDeploymentInput, + gc gitClient, + logger *zap.Logger, +) Loader { + + return &loader{ + appName: appName, + appDir: appDir, + repoDir: repoDir, + configFileName: configFileName, + input: input, + gc: gc, + logger: logger.Named("kubernetes-loader"), + } +} + +// LoadManifests renders and loads all manifests for application. +func (l *loader) LoadManifests(ctx context.Context) (manifests []Manifest, err error) { + defer func() { + // Override namespace if set because ParseManifests does not parse it + // if namespace is not explicitly specified in the manifests. + setNamespace(manifests, l.input.Namespace) + sortManifests(manifests) + }() + l.initOnce.Do(func() { + var initErrorHelm, initErrorKustomize error + l.templatingMethod = determineTemplatingMethod(l.input, l.appDir) + if l.templatingMethod != TemplatingMethodNone { + l.helm, initErrorHelm = l.findHelm(ctx, l.input.HelmVersion) + l.kustomize, initErrorKustomize = l.findKustomize(ctx, l.input.KustomizeVersion) + l.initErr = errors.Join(initErrorHelm, initErrorKustomize) + } + }) + if l.initErr != nil { + return nil, l.initErr + } + + switch l.templatingMethod { + case TemplatingMethodHelm: + var data string + switch { + case l.input.HelmChart.GitRemote != "": + chart := helmRemoteGitChart{ + GitRemote: l.input.HelmChart.GitRemote, + Ref: l.input.HelmChart.Ref, + Path: l.input.HelmChart.Path, + } + data, err = l.helm.TemplateRemoteGitChart(ctx, + l.appName, + l.appDir, + l.input.Namespace, + chart, + l.gc, + l.input.HelmOptions) + + case l.input.HelmChart.Repository != "": + chart := helmRemoteChart{ + Repository: l.input.HelmChart.Repository, + Name: l.input.HelmChart.Name, + Version: l.input.HelmChart.Version, + Insecure: l.input.HelmChart.Insecure, + } + data, err = l.helm.TemplateRemoteChart(ctx, + l.appName, + l.appDir, + l.input.Namespace, + chart, + l.input.HelmOptions) + + default: + data, err = l.helm.TemplateLocalChart(ctx, + l.appName, + l.appDir, + l.input.Namespace, + l.input.HelmChart.Path, + l.input.HelmOptions) + } + + if err != nil { + err = fmt.Errorf("unable to run helm template: %w", err) + return + } + manifests, err = ParseManifests(data) + + case TemplatingMethodKustomize: + var data string + data, err = l.kustomize.Template(ctx, l.appName, l.appDir, l.input.KustomizeOptions) + if err != nil { + err = fmt.Errorf("unable to run kustomize template: %w", err) + return + } + manifests, err = ParseManifests(data) + + case TemplatingMethodNone: + manifests, err = LoadPlainYAMLManifests(l.appDir, l.input.Manifests, l.configFileName) + + default: + err = fmt.Errorf("unsupport templating method %v", l.templatingMethod) + } + + return +} + +func setNamespace(manifests []Manifest, namespace string) { + if namespace == "" { + return + } + for i := range manifests { + manifests[i].Key.Namespace = namespace + } +} + +func sortManifests(manifests []Manifest) { + if len(manifests) < 2 { + return + } + sort.Slice(manifests, func(i, j int) bool { + iAns := manifests[i].GetAnnotations() + // Ignore the converting error since it is not so much important. + iIndex, _ := strconv.Atoi(iAns[AnnotationOrder]) + + jAns := manifests[j].GetAnnotations() + // Ignore the converting error since it is not so much important. + jIndex, _ := strconv.Atoi(jAns[AnnotationOrder]) + + return iIndex < jIndex + }) +} + +func (l *loader) findKustomize(ctx context.Context, version string) (*Kustomize, error) { + path, installed, err := toolregistry.DefaultRegistry().Kustomize(ctx, version) + if err != nil { + return nil, fmt.Errorf("no kustomize %s (%v)", version, err) + } + if installed { + l.logger.Info(fmt.Sprintf("kustomize %s has just been installed because of no pre-installed binary for that version", version)) + } + return NewKustomize(version, path, l.logger), nil +} + +func (l *loader) findHelm(ctx context.Context, version string) (*Helm, error) { + path, installed, err := toolregistry.DefaultRegistry().Helm(ctx, version) + if err != nil { + return nil, fmt.Errorf("no helm %s (%v)", version, err) + } + if installed { + l.logger.Info(fmt.Sprintf("helm %s has just been installed because of no pre-installed binary for that version", version)) + } + return NewHelm(version, path, l.logger), nil +} + +func determineTemplatingMethod(input config.KubernetesDeploymentInput, appDirPath string) TemplatingMethod { + if input.HelmChart != nil { + return TemplatingMethodHelm + } + if _, err := os.Stat(filepath.Join(appDirPath, kustomizationFileName)); err == nil { + return TemplatingMethodKustomize + } + return TemplatingMethodNone +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/loader_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/loader_test.go new file mode 100644 index 0000000000..c553206729 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/loader_test.go @@ -0,0 +1,78 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func TestSortManifests(t *testing.T) { + maker := func(name string, annotations map[string]string) Manifest { + m := Manifest{ + Key: ResourceKey{Name: name}, + u: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + } + m.AddAnnotations(annotations) + return m + } + + testcases := []struct { + name string + manifests []Manifest + want []Manifest + }{ + { + name: "empty", + }, + { + name: "one manifest", + manifests: []Manifest{ + maker("name-1", map[string]string{AnnotationOrder: "0"}), + }, + want: []Manifest{ + maker("name-1", map[string]string{AnnotationOrder: "0"}), + }, + }, + { + name: "multiple manifests", + manifests: []Manifest{ + maker("name-2", map[string]string{AnnotationOrder: "2"}), + maker("name--1", map[string]string{AnnotationOrder: "-1"}), + maker("name-nil", nil), + maker("name-0", map[string]string{AnnotationOrder: "0"}), + maker("name-1", map[string]string{AnnotationOrder: "1"}), + }, + want: []Manifest{ + maker("name--1", map[string]string{AnnotationOrder: "-1"}), + maker("name-nil", nil), + maker("name-0", map[string]string{AnnotationOrder: "0"}), + maker("name-1", map[string]string{AnnotationOrder: "1"}), + maker("name-2", map[string]string{AnnotationOrder: "2"}), + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + sortManifests(tc.manifests) + assert.Equal(t, tc.want, tc.manifests) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/manifest.go b/pkg/app/pipedv1/platformprovider/kubernetes/manifest.go new file mode 100644 index 0000000000..f4daf1d9f2 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/manifest.go @@ -0,0 +1,249 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +type Manifest struct { + Key ResourceKey + u *unstructured.Unstructured +} + +func MakeManifest(key ResourceKey, u *unstructured.Unstructured) Manifest { + return Manifest{ + Key: key, + u: u, + } +} + +func (m Manifest) Duplicate(name string) Manifest { + u := m.u.DeepCopy() + u.SetName(name) + + key := m.Key + key.Name = name + + return Manifest{ + Key: key, + u: u, + } +} + +func (m Manifest) YamlBytes() ([]byte, error) { + return yaml.Marshal(m.u) +} + +func (m Manifest) MarshalJSON() ([]byte, error) { + return m.u.MarshalJSON() +} + +func (m Manifest) AddAnnotations(annotations map[string]string) { + if len(annotations) == 0 { + return + } + + annos := m.u.GetAnnotations() + if annos == nil { + m.u.SetAnnotations(annotations) + return + } + for k, v := range annotations { + annos[k] = v + } + m.u.SetAnnotations(annos) +} + +func (m Manifest) GetAnnotations() map[string]string { + return m.u.GetAnnotations() +} + +func (m Manifest) GetNestedStringMap(fields ...string) (map[string]string, error) { + sm, _, err := unstructured.NestedStringMap(m.u.Object, fields...) + if err != nil { + return nil, err + } + + return sm, nil +} + +func (m Manifest) GetNestedMap(fields ...string) (map[string]interface{}, error) { + sm, _, err := unstructured.NestedMap(m.u.Object, fields...) + if err != nil { + return nil, err + } + + return sm, nil +} + +// AddStringMapValues adds or overrides the given key-values into the string map +// that can be found at the specified fields. +func (m Manifest) AddStringMapValues(values map[string]string, fields ...string) error { + curMap, _, err := unstructured.NestedStringMap(m.u.Object, fields...) + if err != nil { + return err + } + + if curMap == nil { + return unstructured.SetNestedStringMap(m.u.Object, values, fields...) + } + for k, v := range values { + curMap[k] = v + } + return unstructured.SetNestedStringMap(m.u.Object, curMap, fields...) +} + +func (m Manifest) GetSpec() (interface{}, error) { + spec, ok, err := unstructured.NestedFieldNoCopy(m.u.Object, "spec") + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("spec was not found") + } + return spec, nil +} + +func (m Manifest) SetStructuredSpec(spec interface{}) error { + data, err := yaml.Marshal(spec) + if err != nil { + return err + } + + unstructuredSpec := make(map[string]interface{}) + if err := yaml.Unmarshal(data, &unstructuredSpec); err != nil { + return err + } + + return unstructured.SetNestedField(m.u.Object, unstructuredSpec, "spec") +} + +func (m Manifest) ConvertToStructuredObject(o interface{}) error { + data, err := m.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(data, o) +} + +func ParseFromStructuredObject(s interface{}) (Manifest, error) { + data, err := json.Marshal(s) + if err != nil { + return Manifest{}, err + } + + obj := &unstructured.Unstructured{} + if err := obj.UnmarshalJSON(data); err != nil { + return Manifest{}, err + } + + return Manifest{ + Key: MakeResourceKey(obj), + u: obj, + }, nil +} + +func LoadPlainYAMLManifests(dir string, names []string, configFileName string) ([]Manifest, error) { + // If no name was specified we have to walk the app directory to collect the manifest list. + if len(names) == 0 { + err := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + if path == dir { + return nil + } + if f.IsDir() { + return filepath.SkipDir + } + ext := filepath.Ext(f.Name()) + if ext != ".yaml" && ext != ".yml" && ext != ".json" { + return nil + } + if model.IsApplicationConfigFile(f.Name()) { + return nil + } + if f.Name() == configFileName { + return nil + } + names = append(names, f.Name()) + return nil + }) + if err != nil { + return nil, err + } + } + + manifests := make([]Manifest, 0, len(names)) + for _, name := range names { + path := filepath.Join(dir, name) + ms, err := LoadManifestsFromYAMLFile(path) + if err != nil { + return nil, fmt.Errorf("failed to load manifest at %s (%w)", path, err) + } + manifests = append(manifests, ms...) + } + + return manifests, nil +} + +func LoadManifestsFromYAMLFile(path string) ([]Manifest, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return ParseManifests(string(data)) +} + +func ParseManifests(data string) ([]Manifest, error) { + const separator = "\n---" + var ( + parts = strings.Split(data, separator) + manifests = make([]Manifest, 0, len(parts)) + ) + + for i, part := range parts { + // Ignore all the cases where no content between separator. + if len(strings.TrimSpace(part)) == 0 { + continue + } + // Append new line which trim by document separator. + if i != len(parts)-1 { + part += "\n" + } + var obj unstructured.Unstructured + if err := yaml.Unmarshal([]byte(part), &obj); err != nil { + return nil, err + } + if len(obj.Object) == 0 { + continue + } + manifests = append(manifests, Manifest{ + Key: MakeResourceKey(&obj), + u: &obj, + }) + } + return manifests, nil +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/manifest_test.go b/pkg/app/pipedv1/platformprovider/kubernetes/manifest_test.go new file mode 100644 index 0000000000..947f027f82 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/manifest_test.go @@ -0,0 +1,193 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func TestParseManifests(t *testing.T) { + maker := func(name, kind string, metadata map[string]interface{}) Manifest { + return Manifest{ + Key: ResourceKey{ + APIVersion: "v1", + Kind: kind, + Name: name, + Namespace: "default", + }, + u: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": kind, + "metadata": metadata, + }, + }, + } + } + + testcases := []struct { + name string + manifests string + want []Manifest + }{ + { + name: "empty1", + }, + { + name: "empty2", + manifests: "---", + }, + { + name: "empty3", + manifests: "\n---", + }, + { + name: "empty4", + manifests: "\n---\n", + }, + { + name: "multiple empty manifests", + manifests: "---\n---\n---\n---\n---\n", + }, + { + name: "one manifest", + manifests: `--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: envoy-config + creationTimestamp: "2022-12-09T01:23:45Z" +`, + want: []Manifest{ + maker("envoy-config", "ConfigMap", map[string]interface{}{ + "name": "envoy-config", + "creationTimestamp": "2022-12-09T01:23:45Z", + }), + }, + }, + { + name: "contains new line at the end of file", + manifests: ` +apiVersion: v1 +kind: Kind1 +metadata: + name: config + extra: | + single-new-line +`, + want: []Manifest{ + maker("config", "Kind1", map[string]interface{}{ + "name": "config", + "extra": "single-new-line\n", + }), + }, + }, + { + name: "not contains new line at the end of file", + manifests: ` +apiVersion: v1 +kind: Kind1 +metadata: + name: config + extra: | + no-new-line`, + want: []Manifest{ + maker("config", "Kind1", map[string]interface{}{ + "name": "config", + "extra": "no-new-line", + }), + }, + }, + { + name: "multiple manifests", + manifests: ` +apiVersion: v1 +kind: Kind1 +metadata: + name: config1 + extra: |- + no-new-line +--- +apiVersion: v1 +kind: Kind2 +metadata: + name: config2 + extra: | + single-new-line-1 +--- +apiVersion: v1 +kind: Kind3 +metadata: + name: config3 + extra: | + single-new-line-2 + + +--- +apiVersion: v1 +kind: Kind4 +metadata: + name: config4 + extra: |+ + multiple-new-line-1 + + +--- +apiVersion: v1 +kind: Kind5 +metadata: + name: config5 + extra: |+ + multiple-new-line-2 + + +`, + want: []Manifest{ + maker("config1", "Kind1", map[string]interface{}{ + "name": "config1", + "extra": "no-new-line", + }), + maker("config2", "Kind2", map[string]interface{}{ + "name": "config2", + "extra": "single-new-line-1\n", + }), + maker("config3", "Kind3", map[string]interface{}{ + "name": "config3", + "extra": "single-new-line-2\n", + }), + maker("config4", "Kind4", map[string]interface{}{ + "name": "config4", + "extra": "multiple-new-line-1\n\n\n", + }), + maker("config5", "Kind5", map[string]interface{}{ + "name": "config5", + "extra": "multiple-new-line-2\n\n\n", + }), + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + m, err := ParseManifests(tc.manifests) + require.NoError(t, err) + assert.ElementsMatch(t, m, tc.want) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/resource/deployment.go b/pkg/app/pipedv1/platformprovider/kubernetes/resource/deployment.go new file mode 100644 index 0000000000..5a82fd77ec --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/resource/deployment.go @@ -0,0 +1,24 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +type Deployment struct { + Spec DeploymentSpec +} + +type DeploymentSpec struct { + Replicas int + Template PodTemplateSpec +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/resource/pod.go b/pkg/app/pipedv1/platformprovider/kubernetes/resource/pod.go new file mode 100644 index 0000000000..576bfad0d5 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/resource/pod.go @@ -0,0 +1,57 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +type PodTemplateSpec struct { + Spec PodSpec +} + +type PodSpec struct { + InitContainers []Container + Containers []Container + Volumes []Volume +} + +type Container struct { + Name string + Image string + VolumeMounts []VolumeMount +} + +type Volume struct { + Name string + VolumeSource `json:",inline"` +} + +type VolumeSource struct { + Secret *SecretVolumeSource + ConfigMap *ConfigMapVolumeSource +} + +type SecretVolumeSource struct { + SecretName string +} + +type LocalObjectReference struct { + Name string +} + +type ConfigMapVolumeSource struct { + LocalObjectReference `json:",inline"` +} + +type VolumeMount struct { + Name string +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/resource/statefulset.go b/pkg/app/pipedv1/platformprovider/kubernetes/resource/statefulset.go new file mode 100644 index 0000000000..acdf8dfed4 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/resource/statefulset.go @@ -0,0 +1,24 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +type StatefulSet struct { + Spec DeploymentSpec +} + +type StatefulSetSpec struct { + Replicas int + Template PodTemplateSpec +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/resourcekey.go b/pkg/app/pipedv1/platformprovider/kubernetes/resourcekey.go new file mode 100644 index 0000000000..7467e9d133 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/resourcekey.go @@ -0,0 +1,280 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +var builtInAPIVersions = map[string]struct{}{ + "admissionregistration.k8s.io/v1": {}, + "admissionregistration.k8s.io/v1beta1": {}, + "apiextensions.k8s.io/v1": {}, + "apiextensions.k8s.io/v1beta1": {}, + "apiregistration.k8s.io/v1": {}, + "apiregistration.k8s.io/v1beta1": {}, + "apps/v1": {}, + "authentication.k8s.io/v1": {}, + "authentication.k8s.io/v1beta1": {}, + "authorization.k8s.io/v1": {}, + "authorization.k8s.io/v1beta1": {}, + "autoscaling/v1": {}, + "autoscaling/v2beta1": {}, + "autoscaling/v2beta2": {}, + "batch/v1": {}, + "batch/v1beta1": {}, + "certificates.k8s.io/v1beta1": {}, + "coordination.k8s.io/v1": {}, + "coordination.k8s.io/v1beta1": {}, + "extensions/v1beta1": {}, + "internal.autoscaling.k8s.io/v1alpha1": {}, + "metrics.k8s.io/v1beta1": {}, + "networking.k8s.io/v1": {}, + "networking.k8s.io/v1beta1": {}, + "node.k8s.io/v1beta1": {}, + "policy/v1": {}, + "policy/v1beta1": {}, + "rbac.authorization.k8s.io/v1": {}, + "rbac.authorization.k8s.io/v1beta1": {}, + "scheduling.k8s.io/v1": {}, + "scheduling.k8s.io/v1beta1": {}, + "storage.k8s.io/v1": {}, + "storage.k8s.io/v1beta1": {}, + "v1": {}, +} + +const ( + KindDeployment = "Deployment" + KindStatefulSet = "StatefulSet" + KindDaemonSet = "DaemonSet" + KindReplicaSet = "ReplicaSet" + KindPod = "Pod" + KindJob = "Job" + KindCronJob = "CronJob" + KindConfigMap = "ConfigMap" + KindSecret = "Secret" + KindPersistentVolume = "PersistentVolume" + KindPersistentVolumeClaim = "PersistentVolumeClaim" + KindService = "Service" + KindIngress = "Ingress" + KindServiceAccount = "ServiceAccount" + KindRole = "Role" + KindRoleBinding = "RoleBinding" + KindClusterRole = "ClusterRole" + KindClusterRoleBinding = "ClusterRoleBinding" + KindNameSpace = "NameSpace" + KindPodDisruptionBudget = "PodDisruptionBudget" + KindCustomResourceDefinition = "CustomResourceDefinition" + + DefaultNamespace = "default" +) + +type APIVersionKind struct { + APIVersion string + Kind string +} + +type ResourceKey struct { + APIVersion string + Kind string + Namespace string + Name string +} + +func (k ResourceKey) String() string { + return fmt.Sprintf("%s:%s:%s:%s", k.APIVersion, k.Kind, k.Namespace, k.Name) +} + +func (k ResourceKey) ReadableString() string { + return fmt.Sprintf("name=%q, kind=%q, namespace=%q, apiVersion=%q", k.Name, k.Kind, k.Namespace, k.APIVersion) +} + +func (k ResourceKey) IsZero() bool { + return k.APIVersion == "" && + k.Kind == "" && + k.Namespace == "" && + k.Name == "" +} + +func (k ResourceKey) IsDeployment() bool { + if k.Kind != KindDeployment { + return false + } + if !IsKubernetesBuiltInResource(k.APIVersion) { + return false + } + return true +} + +func (k ResourceKey) IsReplicaSet() bool { + if k.Kind != KindReplicaSet { + return false + } + if !IsKubernetesBuiltInResource(k.APIVersion) { + return false + } + return true +} + +func (k ResourceKey) IsWorkload() bool { + if !IsKubernetesBuiltInResource(k.APIVersion) { + return false + } + + switch k.Kind { + case KindDeployment: + return true + case KindReplicaSet: + return true + case KindDaemonSet: + return true + case KindPod: + return true + } + + return false +} + +func (k ResourceKey) IsService() bool { + if k.Kind != KindService { + return false + } + if !IsKubernetesBuiltInResource(k.APIVersion) { + return false + } + return true +} + +func (k ResourceKey) IsConfigMap() bool { + if k.Kind != KindConfigMap { + return false + } + if !IsKubernetesBuiltInResource(k.APIVersion) { + return false + } + return true +} + +func (k ResourceKey) IsSecret() bool { + if k.Kind != KindSecret { + return false + } + if !IsKubernetesBuiltInResource(k.APIVersion) { + return false + } + return true +} + +// IsLess reports whether the key should sort before the given key. +func (k ResourceKey) IsLess(a ResourceKey) bool { + if k.APIVersion < a.APIVersion { + return true + } else if k.APIVersion > a.APIVersion { + return false + } + + if k.Kind < a.Kind { + return true + } else if k.Kind > a.Kind { + return false + } + + if k.Namespace < a.Namespace { + return true + } else if k.Namespace > a.Namespace { + return false + } + + if k.Name < a.Name { + return true + } else if k.Name > a.Name { + return false + } + return false +} + +// IsLessWithIgnoringNamespace reports whether the key should sort before the given key, +// but this ignores the comparation of the namesapce. +func (k ResourceKey) IsLessWithIgnoringNamespace(a ResourceKey) bool { + if k.APIVersion < a.APIVersion { + return true + } else if k.APIVersion > a.APIVersion { + return false + } + + if k.Kind < a.Kind { + return true + } else if k.Kind > a.Kind { + return false + } + + if k.Name < a.Name { + return true + } else if k.Name > a.Name { + return false + } + return false +} + +// IsEqualWithIgnoringNamespace checks whether the key is equal to the given key, +// but this ignores the comparation of the namesapce. +func (k ResourceKey) IsEqualWithIgnoringNamespace(a ResourceKey) bool { + if k.APIVersion != a.APIVersion { + return false + } + if k.Kind != a.Kind { + return false + } + if k.Name != a.Name { + return false + } + return true +} + +func MakeResourceKey(obj *unstructured.Unstructured) ResourceKey { + k := ResourceKey{ + APIVersion: obj.GetAPIVersion(), + Kind: obj.GetKind(), + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + } + if k.Namespace == "" { + k.Namespace = DefaultNamespace + } + return k +} + +func DecodeResourceKey(key string) (ResourceKey, error) { + parts := strings.Split(key, ":") + if len(parts) != 4 { + return ResourceKey{}, fmt.Errorf("malformed key") + } + return ResourceKey{ + APIVersion: parts[0], + Kind: parts[1], + Namespace: parts[2], + Name: parts[3], + }, nil +} + +func IsKubernetesBuiltInResource(apiVersion string) bool { + _, ok := builtInAPIVersions[apiVersion] + // TODO: Change the way to detect whether an APIVersion is built-in or not + // rather than depending on this fixed list. + return ok +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/state.go b/pkg/app/pipedv1/platformprovider/kubernetes/state.go new file mode 100644 index 0000000000..22534073e3 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/state.go @@ -0,0 +1,572 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "sort" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/kubernetes/scheme" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func MakeKubernetesResourceState(uid string, key ResourceKey, obj *unstructured.Unstructured, now time.Time) model.KubernetesResourceState { + var ( + owners = obj.GetOwnerReferences() + ownerIDs = make([]string, 0, len(owners)) + creationTime = obj.GetCreationTimestamp() + status, desc = determineResourceHealth(key, obj) + ) + + for _, owner := range owners { + ownerIDs = append(ownerIDs, string(owner.UID)) + } + sort.Strings(ownerIDs) + + state := model.KubernetesResourceState{ + Id: uid, + OwnerIds: ownerIDs, + // TODO: Think about adding more parents by using label selectors + ParentIds: ownerIDs, + Name: key.Name, + ApiVersion: key.APIVersion, + Kind: key.Kind, + Namespace: obj.GetNamespace(), + + HealthStatus: status, + HealthDescription: desc, + + CreatedAt: creationTime.Unix(), + UpdatedAt: now.Unix(), + } + + return state +} + +func determineResourceHealth(key ResourceKey, obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + if !IsKubernetesBuiltInResource(key.APIVersion) { + desc = fmt.Sprintf("\"%s/%s\" was applied successfully but its health status couldn't be determined exactly. (Because tracking status for this kind of resource is not supported yet.)", key.APIVersion, key.Kind) + return + } + + switch key.Kind { + case KindDeployment: + return determineDeploymentHealth(obj) + case KindStatefulSet: + return determineStatefulSetHealth(obj) + case KindDaemonSet: + return determineDaemonSetHealth(obj) + case KindReplicaSet: + return determineReplicaSetHealth(obj) + case KindPod: + return determinePodHealth(obj) + case KindJob: + return determineJobHealth(obj) + case KindCronJob: + return determineCronJobHealth(obj) + case KindService: + return determineServiceHealth(obj) + case KindIngress: + return determineIngressHealth(obj) + case KindConfigMap: + return determineConfigMapHealth(obj) + case KindPersistentVolume: + return determinePersistentVolumeHealth(obj) + case KindPersistentVolumeClaim: + return determinePVCHealth(obj) + case KindSecret: + return determineSecretHealth(obj) + case KindServiceAccount: + return determineServiceAccountHealth(obj) + case KindRole: + return determineRoleHealth(obj) + case KindRoleBinding: + return determineRoleBindingHealth(obj) + case KindClusterRole: + return determineClusterRoleHealth(obj) + case KindClusterRoleBinding: + return determineClusterRoleBindingHealth(obj) + case KindNameSpace: + return determineNameSpace(obj) + case KindPodDisruptionBudget: + return determinePodDisruptionBudgetHealth(obj) + default: + desc = "Unimplemented or unknown resource" + return + } +} + +func determineRoleHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineRoleBindingHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineClusterRoleHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineClusterRoleBindingHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineDeploymentHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + d := &appsv1.Deployment{} + err := scheme.Scheme.Convert(obj, d, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, d, err) + return + } + + status = model.KubernetesResourceState_OTHER + if d.Spec.Paused { + desc = "Deployment is paused" + return + } + + // Referred to: + // https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L75 + if d.Generation > d.Status.ObservedGeneration { + desc = "Waiting for rollout to finish because observed deployment generation less than desired generation" + return + } + // TimedOutReason is added in a deployment when its newest replica set fails to show any progress + // within the given deadline (progressDeadlineSeconds). + const timedOutReason = "ProgressDeadlineExceeded" + var cond *appsv1.DeploymentCondition + for i := range d.Status.Conditions { + c := d.Status.Conditions[i] + if c.Type == appsv1.DeploymentProgressing { + cond = &c + break + } + } + if cond != nil && cond.Reason == timedOutReason { + desc = fmt.Sprintf("Deployment %q exceeded its progress deadline", obj.GetName()) + } + + if d.Spec.Replicas == nil { + desc = "The number of desired replicas is unspecified" + return + } + if d.Status.UpdatedReplicas < *d.Spec.Replicas { + desc = fmt.Sprintf("Waiting for remaining %d/%d replicas to be updated", d.Status.UpdatedReplicas, *d.Spec.Replicas) + return + } + if d.Status.UpdatedReplicas < d.Status.Replicas { + desc = fmt.Sprintf("%d old replicas are pending termination", d.Status.Replicas-d.Status.UpdatedReplicas) + return + } + if d.Status.AvailableReplicas < d.Status.Replicas { + desc = fmt.Sprintf("Waiting for remaining %d/%d replicas to be available", d.Status.Replicas-d.Status.AvailableReplicas, d.Status.Replicas) + return + } + + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineStatefulSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + s := &appsv1.StatefulSet{} + err := scheme.Scheme.Convert(obj, s, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, s, err) + return + } + + // Referred to: + // https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L130-L149 + status = model.KubernetesResourceState_OTHER + if s.Status.ObservedGeneration == 0 || s.Generation > s.Status.ObservedGeneration { + desc = "Waiting for statefulset spec update to be observed" + return + } + + if s.Spec.Replicas == nil { + desc = "The number of desired replicas is unspecified" + return + } + if *s.Spec.Replicas != s.Status.ReadyReplicas { + desc = fmt.Sprintf("The number of ready replicas (%d) is different from the desired number (%d)", s.Status.ReadyReplicas, *s.Spec.Replicas) + return + } + + // Check if the partitioned roll out is in progress. + if s.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && s.Spec.UpdateStrategy.RollingUpdate != nil { + if s.Spec.Replicas != nil && s.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + if s.Status.UpdatedReplicas < (*s.Spec.Replicas - *s.Spec.UpdateStrategy.RollingUpdate.Partition) { + desc = fmt.Sprintf("Waiting for partitioned roll out to finish because %d out of %d new pods have been updated", + s.Status.UpdatedReplicas, (*s.Spec.Replicas - *s.Spec.UpdateStrategy.RollingUpdate.Partition)) + return + } + } + status = model.KubernetesResourceState_HEALTHY + return + } + + if s.Status.UpdateRevision != s.Status.CurrentRevision { + desc = fmt.Sprintf("Waiting for statefulset rolling update to complete %d pods at revision %s", s.Status.UpdatedReplicas, s.Status.UpdateRevision) + return + } + + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineDaemonSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + d := &appsv1.DaemonSet{} + err := scheme.Scheme.Convert(obj, d, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, d, err) + return + } + + // Referred to: + // https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L107-L115 + status = model.KubernetesResourceState_OTHER + if d.Status.ObservedGeneration == 0 || d.Generation > d.Status.ObservedGeneration { + desc = "Waiting for rollout to finish because observed daemon set generation less than desired generation" + return + } + if d.Status.UpdatedNumberScheduled < d.Status.DesiredNumberScheduled { + desc = fmt.Sprintf("Waiting for daemon set %q rollout to finish because %d out of %d new pods have been updated", d.Name, d.Status.UpdatedNumberScheduled, d.Status.DesiredNumberScheduled) + return + } + if d.Status.NumberAvailable < d.Status.DesiredNumberScheduled { + desc = fmt.Sprintf("Waiting for daemon set %q rollout to finish because %d of %d updated pods are available", d.Name, d.Status.NumberAvailable, d.Status.DesiredNumberScheduled) + return + } + + if d.Status.NumberMisscheduled > 0 { + desc = fmt.Sprintf("%d nodes that are running the daemon pod, but are not supposed to run the daemon pod", d.Status.NumberMisscheduled) + return + } + if d.Status.NumberUnavailable > 0 { + desc = fmt.Sprintf("%d nodes that should be running the daemon pod and have none of the daemon pod running and available", d.Status.NumberUnavailable) + return + } + + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineReplicaSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + r := &appsv1.ReplicaSet{} + err := scheme.Scheme.Convert(obj, r, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, r, err) + return + } + + status = model.KubernetesResourceState_OTHER + if r.Status.ObservedGeneration == 0 || r.Generation > r.Status.ObservedGeneration { + desc = "Waiting for rollout to finish because observed replica set generation less than desired generation" + return + } + + var cond *appsv1.ReplicaSetCondition + for i := range r.Status.Conditions { + c := r.Status.Conditions[i] + if c.Type == appsv1.ReplicaSetReplicaFailure { + cond = &c + break + } + } + switch { + case cond != nil && cond.Status == corev1.ConditionTrue: + desc = cond.Message + return + case r.Spec.Replicas == nil: + desc = "The number of desired replicas is unspecified" + return + case r.Status.AvailableReplicas < *r.Spec.Replicas: + desc = fmt.Sprintf("Waiting for rollout to finish because only %d/%d replicas are available", r.Status.AvailableReplicas, *r.Spec.Replicas) + return + case *r.Spec.Replicas != r.Status.ReadyReplicas: + desc = fmt.Sprintf("The number of ready replicas (%d) is different from the desired number (%d)", r.Status.ReadyReplicas, *r.Spec.Replicas) + return + } + + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineCronJobHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineJobHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + job := &batchv1.Job{} + err := scheme.Scheme.Convert(obj, job, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, job, err) + return + } + + var ( + failed bool + completed bool + message string + ) + for _, condition := range job.Status.Conditions { + switch condition.Type { + case batchv1.JobFailed: + failed = true + completed = true + message = condition.Message + case batchv1.JobComplete: + completed = true + message = condition.Message + } + if failed { + break + } + } + + switch { + case !completed: + status = model.KubernetesResourceState_HEALTHY + desc = "Job is in progress" + case failed: + status = model.KubernetesResourceState_OTHER + desc = message + default: + status = model.KubernetesResourceState_HEALTHY + desc = message + } + + return +} + +func determinePodHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + p := &corev1.Pod{} + err := scheme.Scheme.Convert(obj, p, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, p, err) + return + } + + // Determine based on its container statuses. + if p.Spec.RestartPolicy == corev1.RestartPolicyAlways { + var messages []string + for _, s := range p.Status.ContainerStatuses { + waiting := s.State.Waiting + if waiting == nil { + continue + } + if strings.HasPrefix(waiting.Reason, "Err") || strings.HasSuffix(waiting.Reason, "Error") || strings.HasSuffix(waiting.Reason, "BackOff") { + status = model.KubernetesResourceState_OTHER + messages = append(messages, waiting.Message) + } + } + + if status == model.KubernetesResourceState_OTHER { + desc = strings.Join(messages, ", ") + return + } + } + + // Determine based on its phase. + switch p.Status.Phase { + case corev1.PodRunning, corev1.PodSucceeded: + status = model.KubernetesResourceState_HEALTHY + desc = p.Status.Message + default: + status = model.KubernetesResourceState_OTHER + desc = p.Status.Message + } + return +} + +func determineIngressHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + check := func(ingressList []corev1.LoadBalancerIngress) { + if len(ingressList) == 0 { + status = model.KubernetesResourceState_OTHER + desc = "Ingress points for the load-balancer are in progress" + return + } + status = model.KubernetesResourceState_HEALTHY + } + + v1Ingress := &networkingv1.Ingress{} + err := scheme.Scheme.Convert(obj, v1Ingress, nil) + if err == nil { + check(v1Ingress.Status.LoadBalancer.Ingress) + return + } + + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, v1Ingress, err) + return +} + +func determineServiceHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + s := &corev1.Service{} + err := scheme.Scheme.Convert(obj, s, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, s, err) + return + } + + status = model.KubernetesResourceState_HEALTHY + if s.Spec.Type != corev1.ServiceTypeLoadBalancer { + return + } + if len(s.Status.LoadBalancer.Ingress) == 0 { + status = model.KubernetesResourceState_OTHER + desc = "Ingress points for the load-balancer are in progress" + return + } + return +} + +func determineConfigMapHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineSecretHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determinePersistentVolumeHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + pv := &corev1.PersistentVolume{} + err := scheme.Scheme.Convert(obj, pv, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, pv, err) + return + } + + switch pv.Status.Phase { + case corev1.VolumeBound, corev1.VolumeAvailable: + status = model.KubernetesResourceState_HEALTHY + desc = pv.Status.Message + default: + status = model.KubernetesResourceState_OTHER + desc = pv.Status.Message + } + return +} + +func determinePVCHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + pvc := &corev1.PersistentVolumeClaim{} + err := scheme.Scheme.Convert(obj, pvc, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, pvc, err) + return + } + switch pvc.Status.Phase { + case corev1.ClaimLost: + status = model.KubernetesResourceState_OTHER + desc = "Lost its underlying PersistentVolume" + case corev1.ClaimPending: + status = model.KubernetesResourceState_OTHER + desc = "Being not yet bound" + case corev1.ClaimBound: + status = model.KubernetesResourceState_HEALTHY + default: + status = model.KubernetesResourceState_OTHER + desc = "The current phase of PersistentVolumeClaim is unexpected" + } + return +} + +func determineServiceAccountHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determinePodDisruptionBudgetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + desc = fmt.Sprintf("%q was applied successfully", obj.GetName()) + status = model.KubernetesResourceState_HEALTHY + return +} + +func determineNameSpace(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) { + ns := &corev1.Namespace{} + err := scheme.Scheme.Convert(obj, ns, nil) + if err != nil { + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("Unexpected error while calculating: unable to convert %T to %T: %v", obj, ns, err) + return + } + + switch ns.Status.Phase { + case corev1.NamespaceActive: + // Go to determine based on the status' conditions. + case corev1.NamespaceTerminating: + status = model.KubernetesResourceState_OTHER + desc = "NameSpace is gracefully terminated" + return + default: + status = model.KubernetesResourceState_OTHER + desc = fmt.Sprintf("The NameSpace is at an unexpected phase: %s", ns.Status.Phase) + return + } + + status = model.KubernetesResourceState_HEALTHY + + var cond *corev1.NamespaceCondition + for i := range ns.Status.Conditions { + c := ns.Status.Conditions[i] + switch c.Type { + case corev1.NamespaceDeletionDiscoveryFailure, corev1.NamespaceDeletionContentFailure, corev1.NamespaceDeletionGVParsingFailure: + cond = &c + } + if cond != nil { + break + } + } + + if cond != nil && cond.Status == corev1.ConditionTrue { + status = model.KubernetesResourceState_OTHER + desc = cond.Message + return + } + return +} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_by_command.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_by_command.yaml new file mode 100644 index 0000000000..645055e62d --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_by_command.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: first + image: gcr.io/pipecd/first:v1.0.0 + args: + - a + - b + - c + ports: + - containerPort: 9085 + - name: second + image: gcr.io/pipecd/second:v1.0.0 + args: + - xx + - yy + - zz + ports: + - containerPort: 9085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 3 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: first + image: gcr.io/pipecd/first:v1.0.0 + args: + - a + - d + - b + - c + ports: + - containerPort: 9085 + - name: second + image: gcr.io/pipecd/second:v1.0.0 + args: + - xx + - zz + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_by_command_no_change.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_by_command_no_change.yaml new file mode 100644 index 0000000000..e5462ba31b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_by_command_no_change.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_ignore_missing_fields.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_ignore_missing_fields.yaml new file mode 100644 index 0000000000..9edc380f09 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_ignore_missing_fields.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary + labels: + app: canary +spec: + replicas: 2 + selector: + matchLabels: + app: canary + template: + metadata: + labels: + app: canary + spec: + containers: + - name: helloworld + image: gcr.io/kapetanios/pipecd-helloworld:v0.0.2-159-g2fde42c + args: + - server + ports: + - containerPort: 9085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"pipecd.dev/application":"7230d36c-dceb-4037-b3c8-94abc57b2eda","pipecd.dev/commit-hash":"ef981187e5817c589617a114d5d5ae36adfbb373","pipecd.dev/managed-by":"piped","pipecd.dev/original-api-version":"apps/v1","pipecd.dev/piped":"70feaff4-a6b7-4d03-b5a9-26b2cbabf77b","pipecd.dev/resource-key":"apps/v1:Deployment:default:canary","pipecd.dev/variant":"primary"},"labels":{"app":"canary"},"name":"canary","namespace":"default"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"canary"}},"template":{"metadata":{"labels":{"app":"canary"}},"spec":{"containers":[{"args":["server"],"image":"gcr.io/kapetanios/pipecd-helloworld:v0.0.2-159-g2fde42c","name":"helloworld","ports":[{"containerPort":9085}]}]}}}} + pipecd.dev/application: 7230d36c-dceb-4037-b3c8-94abc57b2eda + pipecd.dev/commit-hash: ef981187e5817c589617a114d5d5ae36adfbb373 + pipecd.dev/managed-by: piped + pipecd.dev/original-api-version: apps/v1 + pipecd.dev/piped: 70feaff4-a6b7-4d03-b5a9-26b2cbabf77b + pipecd.dev/resource-key: apps/v1:Deployment:default:canary + pipecd.dev/variant: primary + creationTimestamp: "2020-06-18T14:23:30Z" + generation: 2 + labels: + app: canary + name: canary + namespace: default + resourceVersion: "3713438" + selfLink: /apis/apps/v1/namespaces/default/deployments/canary + uid: 00e655f8-0c27-477e-9178-97dab0d91316 +spec: + progressDeadlineSeconds: 600 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: canary + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: canary + spec: + containers: + - args: + - server + image: gcr.io/kapetanios/pipecd-helloworld:v0.0.2-159-g2fde42c + imagePullPolicy: IfNotPresent + name: helloworld + ports: + - containerPort: 9085 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 2 + conditions: + - lastTransitionTime: "2020-06-18T14:23:31Z" + lastUpdateTime: "2020-06-18T14:23:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-06-18T14:23:30Z" + lastUpdateTime: "2020-06-18T14:23:31Z" + message: ReplicaSet "canary-78d4c97d9c" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 2 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 \ No newline at end of file diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_ignore_order.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_ignore_order.yaml new file mode 100644 index 0000000000..82c4487051 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_ignore_order.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hello + - hi + ports: + - containerPort: 9085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + pipecd.dev/managed-by: piped + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_multi_diffs.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_multi_diffs.yaml new file mode 100644 index 0000000000..ce4f073fde --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_multi_diffs.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped + change: first +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + pipecd.dev/managed-by: piped + app: simple + change: second +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v2.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_no_diff.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_no_diff.yaml new file mode 100644 index 0000000000..62d9cd9ac2 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_no_diff.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple + pipecd.dev/managed-by: piped +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + pipecd.dev/managed-by: piped + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + template: + metadata: + labels: + app: simple + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v1.0.0 + args: + - hi + - hello + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_redact.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_redact.yaml new file mode 100644 index 0000000000..73802b9f3a --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/diff_redact.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +metadata: + name: pipecd-secrets + namespace: default +kind: Secret +type: Opaque +data: + service-account.json: real-secret-data-1 +--- +apiVersion: v1 +metadata: + name: pipecd-secrets + namespace: default +kind: Secret +type: Opaque +data: + service-account.json: real-secret-data-2 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/.helmignore b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/Chart.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/Chart.yaml new file mode 100644 index 0000000000..5bbebd26c2 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: testchart +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 1.16.0 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/NOTES.txt b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/NOTES.txt new file mode 100644 index 0000000000..9b8fb51f68 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "testchart.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "testchart.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "testchart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "testchart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/_helpers.tpl b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/_helpers.tpl new file mode 100644 index 0000000000..698af2572c --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "testchart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "testchart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "testchart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "testchart.labels" -}} +helm.sh/chart: {{ include "testchart.chart" . }} +{{ include "testchart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "testchart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "testchart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "testchart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "testchart.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/deployment.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/deployment.yaml new file mode 100644 index 0000000000..b9c4cf95df --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/deployment.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "testchart.fullname" . }} + labels: + {{- include "testchart.labels" . | nindent 4 }} + namespace: {{.Release.Namespace}} +spec: +{{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} + selector: + matchLabels: + {{- include "testchart.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "testchart.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "testchart.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/hpa.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/hpa.yaml new file mode 100644 index 0000000000..58c5a47d7e --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "testchart.fullname" . }} + labels: + {{- include "testchart.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "testchart.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/ingress.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/ingress.yaml new file mode 100644 index 0000000000..7c17e022f3 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/ingress.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "testchart.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "testchart.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{.Release.Namespace}} +spec: + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/service.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/service.yaml new file mode 100644 index 0000000000..d8c6e26de7 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "testchart.fullname" . }} + labels: + {{- include "testchart.labels" . | nindent 4 }} + namespace: {{.Release.Namespace}} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "testchart.selectorLabels" . | nindent 4 }} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/serviceaccount.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/serviceaccount.yaml new file mode 100644 index 0000000000..4537db7747 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "testchart.serviceAccountName" . }} + namespace: {{.Release.Namespace}} + labels: + {{- include "testchart.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/tests/test-connection.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..94ec750986 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "testchart.fullname" . }}-test-connection" + labels: + {{- include "testchart.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "testchart.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/values.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/values.yaml new file mode 100644 index 0000000000..6c45a41504 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testchart/values.yaml @@ -0,0 +1,79 @@ +# Default values for testchart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/app.pipecd.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/app.pipecd.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/dir/values.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/dir/values.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/invalid-symlink b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/invalid-symlink new file mode 120000 index 0000000000..555dec973e --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/invalid-symlink @@ -0,0 +1 @@ +/etc/hosts \ No newline at end of file diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/valid-symlink b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/valid-symlink new file mode 120000 index 0000000000..a53324e8c5 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/valid-symlink @@ -0,0 +1 @@ +dir/values.yaml \ No newline at end of file diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/values.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/appconfdir/values.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/values.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testhelm/values.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testkustomize/deployment.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testkustomize/deployment.yaml new file mode 100644 index 0000000000..1360acf696 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testkustomize/deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: the-deployment +spec: + replicas: 3 + selector: + matchLabels: + deployment: hello + template: + metadata: + labels: + deployment: hello + spec: + containers: + - name: the-container + image: monopole/hello:1 + command: ["/hello", + "--port=8080", + "--enableRiskyFeature=$(ENABLE_RISKY)"] + ports: + - containerPort: 8080 + env: + - name: ALT_GREETING + valueFrom: + configMapKeyRef: + name: the-map + key: altGreeting + - name: ENABLE_RISKY + valueFrom: + configMapKeyRef: + name: the-map + key: enableRisky \ No newline at end of file diff --git a/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testkustomize/kustomization.yaml b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testkustomize/kustomization.yaml new file mode 100644 index 0000000000..c7cf5bb89a --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/kubernetes/testdata/testkustomize/kustomization.yaml @@ -0,0 +1,5 @@ +commonLabels: + app: hello + +resources: + - deployment.yaml \ No newline at end of file diff --git a/pkg/app/pipedv1/platformprovider/lambda/client.go b/pkg/app/pipedv1/platformprovider/lambda/client.go new file mode 100644 index 0000000000..a70bf89a92 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/lambda/client.go @@ -0,0 +1,561 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "context" + "errors" + "fmt" + "io" + "reflect" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/lambda" + "github.com/aws/aws-sdk-go-v2/service/lambda/types" + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/backoff" +) + +const ( + defaultAliasName = "Service" + // RequestRetryTime represents the number of times calling to AWS resource control. + RequestRetryTime = 3 + // RetryIntervalDuration represents duration time between retry. + RetryIntervalDuration = 1 * time.Minute +) + +// ErrNotFound lambda resource occurred. +var ErrNotFound = errors.New("lambda resource not found") + +type client struct { + client *lambda.Client + logger *zap.Logger +} + +func newClient(region, profile, credentialsFile, roleARN, tokenPath string, logger *zap.Logger) (*client, error) { + if region == "" { + return nil, fmt.Errorf("region is required field") + } + + c := &client{ + logger: logger.Named("lambda"), + } + + optFns := []func(*config.LoadOptions) error{config.WithRegion(region)} + if credentialsFile != "" { + optFns = append(optFns, config.WithSharedCredentialsFiles([]string{credentialsFile})) + } + if profile != "" { + optFns = append(optFns, config.WithSharedConfigProfile(profile)) + } + if tokenPath != "" && roleARN != "" { + optFns = append(optFns, config.WithWebIdentityRoleCredentialOptions(func(v *stscreds.WebIdentityRoleOptions) { + v.RoleARN = roleARN + v.TokenRetriever = stscreds.IdentityTokenFile(tokenPath) + })) + } + + // When you initialize an aws.Config instance using config.LoadDefaultConfig, the SDK uses its default credential chain to find AWS credentials. + // This default credential chain looks for credentials in the following order: + // + // 1. Environment variables. + // 1. Static Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN) + // 2. Web Identity Token (AWS_WEB_IDENTITY_TOKEN_FILE) + // 2. Shared configuration files. + // 1. SDK defaults to credentials file under .aws folder that is placed in the home folder on your computer. + // 2. SDK defaults to config file under .aws folder that is placed in the home folder on your computer. + // 3. If your application uses an ECS task definition or RunTask API operation, IAM role for tasks. + // 4. If your application is running on an Amazon EC2 instance, IAM role for Amazon EC2. + // ref: https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials + cfg, err := config.LoadDefaultConfig(context.Background(), optFns...) + if err != nil { + return nil, fmt.Errorf("failed to load config to create lambda client: %w", err) + } + c.client = lambda.NewFromConfig(cfg) + + return c, nil +} + +func (c *client) IsFunctionExist(ctx context.Context, name string) (bool, error) { + input := &lambda.GetFunctionInput{ + FunctionName: aws.String(name), + } + _, err := c.client.GetFunction(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + // Only in case ResourceNotFound error occurred, the FunctionName is available for create so do not raise error. + return false, nil + } + return false, err + } + return true, nil +} + +func (c *client) CreateFunction(ctx context.Context, fm FunctionManifest) error { + input := &lambda.CreateFunctionInput{ + FunctionName: aws.String(fm.Spec.Name), + Role: aws.String(fm.Spec.Role), + MemorySize: aws.Int32(fm.Spec.Memory), + Timeout: aws.Int32(fm.Spec.Timeout), + Tags: fm.Spec.Tags, + Environment: &types.Environment{ + Variables: fm.Spec.Environments, + }, + } + if len(fm.Spec.Architectures) != 0 { + var architectures []types.Architecture + for _, arch := range fm.Spec.Architectures { + architectures = append(architectures, types.Architecture(arch.Name)) + } + input.Architectures = architectures + } + if fm.Spec.EphemeralStorage != nil { + input.EphemeralStorage = &types.EphemeralStorage{ + Size: aws.Int32(fm.Spec.EphemeralStorage.Size), + } + } + if fm.Spec.VPCConfig != nil { + input.VpcConfig = &types.VpcConfig{ + SecurityGroupIds: fm.Spec.VPCConfig.SecurityGroupIDs, + SubnetIds: fm.Spec.VPCConfig.SubnetIDs, + } + } + // Container image packing. + if fm.Spec.ImageURI != "" { + input.PackageType = types.PackageTypeImage + input.Code = &types.FunctionCode{ + ImageUri: aws.String(fm.Spec.ImageURI), + } + } + // Zip packing which stored in s3. + if fm.Spec.S3Bucket != "" { + input.PackageType = types.PackageTypeZip + input.Code = &types.FunctionCode{ + S3Bucket: aws.String(fm.Spec.S3Bucket), + S3Key: aws.String(fm.Spec.S3Key), + S3ObjectVersion: aws.String(fm.Spec.S3ObjectVersion), + } + input.Handler = aws.String(fm.Spec.Handler) + input.Runtime = types.Runtime(fm.Spec.Runtime) + } + _, err := c.client.CreateFunction(ctx, input) + if err != nil { + return fmt.Errorf("failed to create Lambda function %s: %w", fm.Spec.Name, err) + } + return nil +} + +func (c *client) CreateFunctionFromSource(ctx context.Context, fm FunctionManifest, zip io.Reader) error { + data, err := io.ReadAll(zip) + if err != nil { + return err + } + + input := &lambda.CreateFunctionInput{ + FunctionName: aws.String(fm.Spec.Name), + PackageType: types.PackageTypeZip, + Code: &types.FunctionCode{ + ZipFile: data, + }, + Handler: aws.String(fm.Spec.Handler), + Runtime: types.Runtime(fm.Spec.Runtime), + Role: aws.String(fm.Spec.Role), + MemorySize: aws.Int32(fm.Spec.Memory), + Timeout: aws.Int32(fm.Spec.Timeout), + Tags: fm.Spec.Tags, + Environment: &types.Environment{ + Variables: fm.Spec.Environments, + }, + } + if len(fm.Spec.Architectures) != 0 { + architectures := make([]types.Architecture, 0, len(fm.Spec.Architectures)) + for _, arch := range fm.Spec.Architectures { + architectures = append(architectures, types.Architecture(arch.Name)) + } + input.Architectures = architectures + } + if fm.Spec.EphemeralStorage != nil { + input.EphemeralStorage = &types.EphemeralStorage{ + Size: aws.Int32(fm.Spec.EphemeralStorage.Size), + } + } + if fm.Spec.VPCConfig != nil { + input.VpcConfig = &types.VpcConfig{ + SecurityGroupIds: fm.Spec.VPCConfig.SecurityGroupIDs, + SubnetIds: fm.Spec.VPCConfig.SubnetIDs, + } + } + _, err = c.client.CreateFunction(ctx, input) + if err != nil { + return fmt.Errorf("failed to create Lambda function %s: %w", fm.Spec.Name, err) + } + return nil +} + +func (c *client) UpdateFunction(ctx context.Context, fm FunctionManifest) error { + // UpdateFunctionConfiguration must be called before UpdateFunctionCode. + // Lambda has named by state. + // If Lambda's state is pending, UpdateFunctionConfiguration is failed. This error is explained as a ResourceConflictException. + // ref: https://docs.aws.amazon.com/lambda/latest/dg/troubleshooting-invocation.html + // Update function configuration. + if err := c.updateFunctionConfiguration(ctx, fm); err != nil { + return err + } + // Update function code. + codeInput := &lambda.UpdateFunctionCodeInput{ + FunctionName: aws.String(fm.Spec.Name), + } + // Container image packing. + if fm.Spec.ImageURI != "" { + codeInput.ImageUri = aws.String(fm.Spec.ImageURI) + } + // Zip packing which stored in s3. + if fm.Spec.S3Bucket != "" { + codeInput.S3Bucket = aws.String(fm.Spec.S3Bucket) + codeInput.S3Key = aws.String(fm.Spec.S3Key) + codeInput.S3ObjectVersion = aws.String(fm.Spec.S3ObjectVersion) + } + if len(fm.Spec.Architectures) != 0 { + var architectures []types.Architecture + for _, arch := range fm.Spec.Architectures { + architectures = append(architectures, types.Architecture(arch.Name)) + } + codeInput.Architectures = architectures + } + _, err := c.client.UpdateFunctionCode(ctx, codeInput) + if err != nil { + return fmt.Errorf("failed to update function code for Lambda function %s: %w", fm.Spec.Name, err) + } + + // Tag/Untag function if necessary. + return c.updateTagsConfig(ctx, fm) +} + +func (c *client) UpdateFunctionFromSource(ctx context.Context, fm FunctionManifest, zip io.Reader) error { + // Update function configuration. + if err := c.updateFunctionConfiguration(ctx, fm); err != nil { + return err + } + + data, err := io.ReadAll(zip) + if err != nil { + return err + } + + // Update function code. + codeInput := &lambda.UpdateFunctionCodeInput{ + FunctionName: aws.String(fm.Spec.Name), + ZipFile: data, + } + if len(fm.Spec.Architectures) != 0 { + var architectures []types.Architecture + for _, arch := range fm.Spec.Architectures { + architectures = append(architectures, types.Architecture(arch.Name)) + } + codeInput.Architectures = architectures + } + _, err = c.client.UpdateFunctionCode(ctx, codeInput) + if err != nil { + return fmt.Errorf("failed to update function code for Lambda function %s: %w", fm.Spec.Name, err) + } + + // Tag/Untag function if necessary. + return c.updateTagsConfig(ctx, fm) +} + +func (c *client) updateFunctionConfiguration(ctx context.Context, fm FunctionManifest) error { + retry := backoff.NewRetry(RequestRetryTime, backoff.NewConstant(RetryIntervalDuration)) + updateFunctionConfigurationSucceed := false + var err error + for retry.WaitNext(ctx) { + configInput := &lambda.UpdateFunctionConfigurationInput{ + FunctionName: aws.String(fm.Spec.Name), + Role: aws.String(fm.Spec.Role), + MemorySize: aws.Int32(fm.Spec.Memory), + Timeout: aws.Int32(fm.Spec.Timeout), + Runtime: types.Runtime(fm.Spec.Runtime), + Environment: &types.Environment{ + Variables: fm.Spec.Environments, + }, + } + // For zip packing Lambda function code, allow update the function handler + // on update the function's manifest. + if fm.Spec.Handler != "" { + configInput.Handler = aws.String(fm.Spec.Handler) + } + if fm.Spec.EphemeralStorage != nil { + configInput.EphemeralStorage = &types.EphemeralStorage{ + Size: aws.Int32(fm.Spec.EphemeralStorage.Size), + } + } + if fm.Spec.VPCConfig != nil { + configInput.VpcConfig = &types.VpcConfig{ + SecurityGroupIds: fm.Spec.VPCConfig.SecurityGroupIDs, + SubnetIds: fm.Spec.VPCConfig.SubnetIDs, + } + } + _, err = c.client.UpdateFunctionConfiguration(ctx, configInput) + if err != nil { + c.logger.Error("Failed to update function configuration") + } else { + updateFunctionConfigurationSucceed = true + break + } + } + if !updateFunctionConfigurationSucceed { + return fmt.Errorf("failed to update configuration for Lambda function %s: %w", fm.Spec.Name, err) + } + + // Wait until function updated successfully. + retry = backoff.NewRetry(RequestRetryTime, backoff.NewConstant(RetryIntervalDuration)) + input := &lambda.GetFunctionInput{ + FunctionName: aws.String(fm.Spec.Name), + } + _, err = retry.Do(ctx, func() (any, error) { + output, err := c.client.GetFunction(ctx, input) + if err != nil { + return nil, err + } + if output.Configuration.LastUpdateStatus != types.LastUpdateStatusSuccessful { + return nil, fmt.Errorf("failed to update Lambda function %s, status code %v, error reason %s", + fm.Spec.Name, output.Configuration.LastUpdateStatus, *output.Configuration.LastUpdateStatusReason) + } + return nil, nil + }) + return err +} + +func (c *client) PublishFunction(ctx context.Context, fm FunctionManifest) (string, error) { + input := &lambda.PublishVersionInput{ + FunctionName: aws.String(fm.Spec.Name), + } + cfg, err := c.client.PublishVersion(ctx, input) + if err != nil { + return "", fmt.Errorf("failed to publish new version for Lambda function %s: %w", fm.Spec.Name, err) + } + return aws.ToString(cfg.Version), nil +} + +// GetTrafficConfig returns lambda provider.ErrNotFound in case remote traffic config is not existed. +func (c *client) GetTrafficConfig(ctx context.Context, fm FunctionManifest) (routingTrafficCfg RoutingTrafficConfig, err error) { + input := &lambda.GetAliasInput{ + FunctionName: aws.String(fm.Spec.Name), + Name: aws.String(defaultAliasName), + } + + cfg, err := c.client.GetAlias(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + err = ErrNotFound + } + return + } + + routingTrafficCfg = make(map[TrafficConfigKeyName]VersionTraffic) + /* The current return value from GetAlias as below + { + "AliasArn": "arn:aws:lambda:ap-northeast-1:769161735124:function:SimpleCanaryFunction:Service", + "Name": "Service", + "FunctionVersion": "1", + "Description": "", + "RoutingConfig": { + "AdditionalVersionWeights": { + "3": 0.9 + } + }, + "RevisionId": "fe08805f-9851-44fc-9a79-6e086aefc290" + } + Note: + - In case RoutingConfig is nil, this mean 100% of traffic is handled by version represented by FunctionVersion value (PRIMARY version). + - In case RoutingConfig is not nil, RoutingConfig.AdditionalVersionWeights is expected to have ONLY ONE key/value pair + which presents the SECONDARY version handling traffic (represented by the value of the pair). + in short + _ version: 1 - FunctionVersion (the PRIMARY) handles (1 - 0.9) percentage of current traffic. + _ version: 3 - AdditionalVersionWeights key (the SECONDARY) handles 0.9 percentage of current traffic. + */ + // In case RoutingConfig is nil, 100 percent of current traffic is handled by FunctionVersion version. + if cfg.RoutingConfig == nil { + routingTrafficCfg[TrafficPrimaryVersionKeyName] = VersionTraffic{ + Version: aws.ToString(cfg.FunctionVersion), + Percent: 100, + } + return + } + // In case RoutingConfig is provided, FunctionVersion value represents the primary version while + // RoutingConfig.AdditionalVersionWeights key represents the secondary version. + var secondaryVersionTraffic float64 + for version, weight := range cfg.RoutingConfig.AdditionalVersionWeights { + secondaryVersionTraffic = percentageToPercent(weight) + routingTrafficCfg[TrafficSecondaryVersionKeyName] = VersionTraffic{ + Version: version, + Percent: secondaryVersionTraffic, + } + } + routingTrafficCfg[TrafficPrimaryVersionKeyName] = VersionTraffic{ + Version: aws.ToString(cfg.FunctionVersion), + Percent: 100 - secondaryVersionTraffic, + } + + return +} + +func (c *client) CreateTrafficConfig(ctx context.Context, fm FunctionManifest, version string) error { + input := &lambda.CreateAliasInput{ + FunctionName: aws.String(fm.Spec.Name), + FunctionVersion: aws.String(version), + Name: aws.String(defaultAliasName), + } + _, err := c.client.CreateAlias(ctx, input) + if err != nil { + return fmt.Errorf("failed to create traffic config for Lambda function %s: %w", fm.Spec.Name, err) + } + return nil +} + +func (c *client) UpdateTrafficConfig(ctx context.Context, fm FunctionManifest, routingTraffic RoutingTrafficConfig) error { + primary, ok := routingTraffic[TrafficPrimaryVersionKeyName] + if !ok { + return fmt.Errorf("invalid routing traffic configuration given: primary version not found") + } + + input := &lambda.UpdateAliasInput{ + FunctionName: aws.String(fm.Spec.Name), + Name: aws.String(defaultAliasName), + FunctionVersion: aws.String(primary.Version), + } + + if secondary, ok := routingTraffic[TrafficSecondaryVersionKeyName]; ok { + routingTrafficMap := make(map[string]float64) + routingTrafficMap[secondary.Version] = precentToPercentage(secondary.Percent) + input.RoutingConfig = &types.AliasRoutingConfiguration{ + AdditionalVersionWeights: routingTrafficMap, + } + } + + _, err := c.client.UpdateAlias(ctx, input) + if err != nil { + return fmt.Errorf("failed to update traffic config for Lambda function %s: %w", fm.Spec.Name, err) + } + return nil +} + +func (c *client) updateTagsConfig(ctx context.Context, fm FunctionManifest) error { + getFuncInput := &lambda.GetFunctionInput{ + FunctionName: aws.String(fm.Spec.Name), + } + output, err := c.client.GetFunction(ctx, getFuncInput) + if err != nil { + return fmt.Errorf("error occurred on list tags of Lambda function %s: %w", fm.Spec.Name, err) + } + + functionArn := aws.ToString(output.Configuration.FunctionArn) + currentTags := output.Tags + // Skip if there are no changes on tags. + if reflect.DeepEqual(currentTags, fm.Spec.Tags) { + return nil + } + + newDefinedTags, updatedTags, removedTags := makeFlowControlTagsMaps(currentTags, fm.Spec.Tags) + + if len(newDefinedTags) > 0 { + if err := c.tagFunction(ctx, functionArn, newDefinedTags); err != nil { + return fmt.Errorf("failed on add new defined tags to Lambda function %s: %w", fm.Spec.Name, err) + } + } + + if len(updatedTags) > 0 { + if err := c.untagFunction(ctx, functionArn, updatedTags); err != nil { + return fmt.Errorf("failed on update changed tags to Lambda function %s: %w", fm.Spec.Name, err) + } + if err := c.tagFunction(ctx, functionArn, updatedTags); err != nil { + return fmt.Errorf("failed on update changed tags to Lambda function %s: %w", fm.Spec.Name, err) + } + } + + if len(removedTags) > 0 { + if err := c.untagFunction(ctx, functionArn, removedTags); err != nil { + return fmt.Errorf("failed on remove tags for Lambda function %s: %w", fm.Spec.Name, err) + } + } + + return nil +} + +func (c *client) tagFunction(ctx context.Context, functionArn string, tags map[string]string) error { + tagInput := &lambda.TagResourceInput{ + Resource: aws.String(functionArn), + Tags: tags, + } + _, err := c.client.TagResource(ctx, tagInput) + if err != nil { + return err + } + + return nil +} + +func (c *client) untagFunction(ctx context.Context, functionArn string, tags map[string]string) error { + tagsKeys := make([]string, 0, len(tags)) + for k := range tags { + tagsKeys = append(tagsKeys, k) + } + untagInput := &lambda.UntagResourceInput{ + Resource: aws.String(functionArn), + TagKeys: tagsKeys, + } + _, err := c.client.UntagResource(ctx, untagInput) + if err != nil { + return err + } + + return nil +} + +func makeFlowControlTagsMaps(remoteTags, definedTags map[string]string) (newDefinedTags, updatedTags, removedTags map[string]string) { + newDefinedTags = make(map[string]string) + updatedTags = make(map[string]string) + removedTags = make(map[string]string) + for k, v := range definedTags { + val, ok := remoteTags[k] + if !ok { + newDefinedTags[k] = v + continue + } + if val != v { + updatedTags[k] = v + } + } + for k, v := range remoteTags { + _, ok := definedTags[k] + if !ok { + removedTags[k] = v + } + } + return +} + +func precentToPercentage(in float64) float64 { + return in / 100.0 +} + +func percentageToPercent(in float64) float64 { + return in * 100 +} diff --git a/pkg/app/pipedv1/platformprovider/lambda/client_test.go b/pkg/app/pipedv1/platformprovider/lambda/client_test.go new file mode 100644 index 0000000000..8675cf2428 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/lambda/client_test.go @@ -0,0 +1,133 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMakeFlowControlTagsMap(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + remoteTags map[string]string + definedTags map[string]string + wantedNewDefinedTags map[string]string + wantedUpdatedTags map[string]string + wantedRemovedTags map[string]string + }{ + { + name: "has only updated tags", + remoteTags: map[string]string{ + "app": "simple", + "function": "code", + }, + definedTags: map[string]string{ + "app": "simple-app", + "function": "code", + }, + wantedNewDefinedTags: map[string]string{}, + wantedUpdatedTags: map[string]string{ + "app": "simple-app", + }, + wantedRemovedTags: map[string]string{}, + }, + { + name: "has only remove tags", + remoteTags: map[string]string{ + "app": "simple", + "function": "code", + }, + definedTags: map[string]string{ + "app": "simple", + }, + wantedNewDefinedTags: map[string]string{}, + wantedUpdatedTags: map[string]string{}, + wantedRemovedTags: map[string]string{ + "function": "code", + }, + }, + { + name: "has only newly defined tags", + remoteTags: map[string]string{ + "app": "simple", + }, + definedTags: map[string]string{ + "app": "simple", + "function": "code", + }, + wantedNewDefinedTags: map[string]string{ + "function": "code", + }, + wantedUpdatedTags: map[string]string{}, + wantedRemovedTags: map[string]string{}, + }, + { + name: "complex defined tags", + remoteTags: map[string]string{ + "app": "simple", + "function": "code", + }, + definedTags: map[string]string{ + "foo": "bar", + "app": "simple-app", + "bar": "foo", + }, + wantedNewDefinedTags: map[string]string{ + "foo": "bar", + "bar": "foo", + }, + wantedUpdatedTags: map[string]string{ + "app": "simple-app", + }, + wantedRemovedTags: map[string]string{ + "function": "code", + }, + }, + { + name: "defined tags is nil", + remoteTags: map[string]string{ + "app": "simple", + "function": "code", + }, + definedTags: nil, + wantedNewDefinedTags: map[string]string{}, + wantedUpdatedTags: map[string]string{}, + wantedRemovedTags: map[string]string{ + "app": "simple", + "function": "code", + }, + }, + { + name: "remote tags is empty and defined tags is nil", + remoteTags: map[string]string{}, + definedTags: nil, + wantedNewDefinedTags: map[string]string{}, + wantedUpdatedTags: map[string]string{}, + wantedRemovedTags: map[string]string{}, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + newDefinedTags, updatedTags, removedTags := makeFlowControlTagsMaps(tc.remoteTags, tc.definedTags) + assert.Equal(t, tc.wantedNewDefinedTags, newDefinedTags) + assert.Equal(t, tc.wantedUpdatedTags, updatedTags) + assert.Equal(t, tc.wantedRemovedTags, removedTags) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/lambda/function.go b/pkg/app/pipedv1/platformprovider/lambda/function.go new file mode 100644 index 0000000000..b1276d268d --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/lambda/function.go @@ -0,0 +1,282 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "fmt" + "os" + "strings" + + "sigs.k8s.io/yaml" + + "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/model" +) + +const ( + versionV1Beta1 = "pipecd.dev/v1beta1" + functionManifestKind = "LambdaFunction" + // Memory and Timeout lower and upper limit as noted via + // https://docs.aws.amazon.com/sdk-for-go/api/service/lambda/#UpdateFunctionConfigurationInput + memoryLowerLimit = 1 + timeoutLowerLimit = 1 + timeoutUpperLimit = 900 + ephemeralStorageLowerLimit = 512 + ephemeralStorageUpperLimit = 10240 +) + +type FunctionManifest struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion,omitempty"` + Spec FunctionManifestSpec `json:"spec"` +} + +func (fm *FunctionManifest) validate() error { + if fm.APIVersion != versionV1Beta1 { + return fmt.Errorf("unsupported version: %s", fm.APIVersion) + } + if fm.Kind != functionManifestKind { + return fmt.Errorf("invalid manifest kind given: %s", fm.Kind) + } + if err := fm.Spec.validate(); err != nil { + return err + } + return nil +} + +// FunctionManifestSpec contains configuration for LambdaFunction. +type FunctionManifestSpec struct { + Name string `json:"name"` + Role string `json:"role"` + ImageURI string `json:"image"` + S3Bucket string `json:"s3Bucket"` + S3Key string `json:"s3Key"` + S3ObjectVersion string `json:"s3ObjectVersion"` + SourceCode SourceCode `json:"source"` + Handler string `json:"handler"` + Architectures []Architecture `json:"architectures,omitempty"` + EphemeralStorage *EphemeralStorage `json:"ephemeralStorage,omitempty"` + Runtime string `json:"runtime"` + Memory int32 `json:"memory"` + Timeout int32 `json:"timeout"` + Tags map[string]string `json:"tags,omitempty"` + Environments map[string]string `json:"environments,omitempty"` + VPCConfig *VPCConfig `json:"vpcConfig,omitempty"` +} + +type VPCConfig struct { + SecurityGroupIDs []string `json:"securityGroupIds,omitempty"` + SubnetIDs []string `json:"subnetIds,omitempty"` +} + +func (fmp FunctionManifestSpec) validate() error { + if fmp.Name == "" { + return fmt.Errorf("lambda function is missing") + } + if fmp.ImageURI == "" && fmp.S3Bucket == "" { + if err := fmp.SourceCode.validate(); err != nil { + return err + } + } + if fmp.ImageURI == "" { + if fmp.Handler == "" { + return fmt.Errorf("handler is missing") + } + if fmp.Runtime == "" { + return fmt.Errorf("runtime is missing") + } + } + for _, arch := range fmp.Architectures { + if err := arch.validate(); err != nil { + return fmt.Errorf("architecture is invalid: %w", err) + } + } + if fmp.EphemeralStorage != nil { + if err := fmp.EphemeralStorage.validate(); err != nil { + return fmt.Errorf("ephemeral storage is invalid: %w", err) + } + } + if fmp.Role == "" { + return fmt.Errorf("role is missing") + } + if fmp.Memory < memoryLowerLimit { + return fmt.Errorf("memory is missing") + } + if fmp.Timeout < timeoutLowerLimit || fmp.Timeout > timeoutUpperLimit { + return fmt.Errorf("timeout is missing or out of range") + } + return nil +} + +type SourceCode struct { + Git string `json:"git"` + Ref string `json:"ref"` + Path string `json:"path"` +} + +func (sc SourceCode) validate() error { + if sc.Git == "" { + return fmt.Errorf("remote git source is missing") + } + if sc.Ref == "" { + return fmt.Errorf("source ref is missing") + } + return nil +} + +type Architecture struct { + Name string `json:"name"` +} + +func (a Architecture) validate() error { + if a.Name != "x86_64" && a.Name != "arm64" { + return fmt.Errorf("architecture is invalid") + } + return nil +} + +type EphemeralStorage struct { + Size int32 `json:"size,omitempty"` +} + +func (es EphemeralStorage) validate() error { + if es.Size < ephemeralStorageLowerLimit || es.Size > ephemeralStorageUpperLimit { + return fmt.Errorf("ephemeral storage is out of range") + } + return nil +} + +func loadFunctionManifest(path string) (FunctionManifest, error) { + data, err := os.ReadFile(path) + if err != nil { + return FunctionManifest{}, err + } + return parseFunctionManifest(data) +} + +func parseFunctionManifest(data []byte) (FunctionManifest, error) { + var obj FunctionManifest + if err := yaml.Unmarshal(data, &obj); err != nil { + return FunctionManifest{}, err + } + if err := obj.validate(); err != nil { + return FunctionManifest{}, err + } + return obj, nil +} + +// DecideRevisionName returns revision name to apply. +func DecideRevisionName(fm FunctionManifest, commit string) (string, error) { + tag, err := FindImageTag(fm) + if err != nil { + return "", err + } + tag = strings.ReplaceAll(tag, ".", "") + + if len(commit) > 7 { + commit = commit[:7] + } + return fmt.Sprintf("%s-%s-%s", fm.Spec.Name, tag, commit), nil +} + +// FindImageTag parses image tag from given LambdaFunction manifest. +func FindImageTag(fm FunctionManifest) (string, error) { + name, tag := parseContainerImage(fm.Spec.ImageURI) + if name == "" { + return "", fmt.Errorf("image name could not be empty") + } + return tag, nil +} + +func parseContainerImage(image string) (name, tag string) { + parts := strings.Split(image, ":") + if len(parts) == 2 { + tag = parts[1] + } + paths := strings.Split(parts[0], "/") + name = paths[len(paths)-1] + return +} + +// FindArtifactVersions parses artifact versions from function.yaml. +func FindArtifactVersions(fm FunctionManifest) ([]*model.ArtifactVersion, error) { + // Extract container image tag as application version. + if fm.Spec.ImageURI != "" { + name, tag := parseContainerImage(fm.Spec.ImageURI) + if name == "" { + return nil, fmt.Errorf("image name could not be empty") + } + + return []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: tag, + Name: name, + Url: fm.Spec.ImageURI, + }, + }, nil + } + + // Extract s3 object version as application version. + if fm.Spec.S3ObjectVersion != "" { + return []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_S3_OBJECT, + Version: fm.Spec.S3ObjectVersion, + Name: fm.Spec.S3Key, + Url: fmt.Sprintf("https://console.aws.amazon.com/s3/object/%s?prefix=%s", fm.Spec.S3Bucket, fm.Spec.S3Key), + }, + }, nil + } + + // Extract source code commihash as application version. + if fm.Spec.SourceCode.Ref != "" { + u, err := git.ParseGitURL(fm.Spec.SourceCode.Git) + if err != nil { + return nil, err + } + + scheme := "https" + if u.Scheme != "ssh" { + scheme = u.Scheme + } + + repoPath := strings.Trim(u.Path, "/") + repoPath = strings.TrimSuffix(repoPath, ".git") + + var gitURL string + switch u.Host { + case "github.com", "gitlab.com": + gitURL = fmt.Sprintf("%s://%s/%s/commit/%s", scheme, u.Host, repoPath, fm.Spec.SourceCode.Ref) + case "bitbucket.org": + gitURL = fmt.Sprintf("%s://%s/%s/commits/%s", scheme, u.Host, repoPath, fm.Spec.SourceCode.Ref) + default: + // TODO: Show repo name with commit link for other git provider + gitURL = "" + repoPath = "" + } + + return []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_GIT_SOURCE, + Version: fm.Spec.SourceCode.Ref, + Name: repoPath, + Url: gitURL, + }, + }, nil + } + + return nil, fmt.Errorf("couldn't determine artifact versions") +} diff --git a/pkg/app/pipedv1/platformprovider/lambda/function_test.go b/pkg/app/pipedv1/platformprovider/lambda/function_test.go new file mode 100644 index 0000000000..a98ec94362 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/lambda/function_test.go @@ -0,0 +1,500 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestParseFunctionManifest(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + data string + wantSpec interface{} + wantErr bool + }{ + { + name: "correct config for LambdaFunction", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 5, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1" + } +}`, + wantSpec: FunctionManifest{ + Kind: "LambdaFunction", + APIVersion: "pipecd.dev/v1beta1", + Spec: FunctionManifestSpec{ + Name: "SimpleFunction", + Role: "arn:aws:iam::xxxxx:role/lambda-role", + Memory: 128, + Timeout: 5, + ImageURI: "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + }, + }, + wantErr: false, + }, + { + name: "correct config for LambdaFunction with specifying architecture", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 5, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + "architectures": [ + { + "name": "x86_64", + }, + { + "name": "arm64", + } + ] + } +}`, + wantSpec: FunctionManifest{ + Kind: "LambdaFunction", + APIVersion: "pipecd.dev/v1beta1", + Spec: FunctionManifestSpec{ + Name: "SimpleFunction", + Role: "arn:aws:iam::xxxxx:role/lambda-role", + Memory: 128, + Timeout: 5, + ImageURI: "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + Architectures: []Architecture{ + { + Name: "x86_64", + }, + { + Name: "arm64", + }, + }, + }, + }, + wantErr: false, + }, + { + name: "correct config for LambdaFunction with specifying ephemeral storage", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 5, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + "ephemeralStorage": { + size: 512 + } + } +}`, + wantSpec: FunctionManifest{ + Kind: "LambdaFunction", + APIVersion: "pipecd.dev/v1beta1", + Spec: FunctionManifestSpec{ + Name: "SimpleFunction", + Role: "arn:aws:iam::xxxxx:role/lambda-role", + Memory: 128, + Timeout: 5, + ImageURI: "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + EphemeralStorage: &EphemeralStorage{Size: 512}, + }, + }, + wantErr: false, + }, + { + name: "correct config for LambdaFunction with specifying vpc config", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 5, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + "vpcConfig": { + securityGroupIds: ["sg-1234567890", "sg-0987654321"], + subnetIds: ["subnet-1234567890", "subnet-0987654321"] + } + } +}`, + wantSpec: FunctionManifest{ + Kind: "LambdaFunction", + APIVersion: "pipecd.dev/v1beta1", + Spec: FunctionManifestSpec{ + Name: "SimpleFunction", + Role: "arn:aws:iam::xxxxx:role/lambda-role", + Memory: 128, + Timeout: 5, + ImageURI: "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + VPCConfig: &VPCConfig{ + SecurityGroupIDs: []string{"sg-1234567890", "sg-0987654321"}, + SubnetIDs: []string{"subnet-1234567890", "subnet-0987654321"}, + }, + }, + }, + wantErr: false, + }, + { + name: "missing required fields", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": {} +}`, + wantSpec: FunctionManifest{}, + wantErr: true, + }, + { + name: "missing memory value", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "timeout": 5, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1" + } +}`, + wantSpec: FunctionManifest{}, + wantErr: true, + }, + { + name: "invalid timeout value", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 1000, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1" + } +}`, + wantSpec: FunctionManifest{}, + wantErr: true, + }, + { + name: "invalid ephemeral storage value", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 5, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + "ephemeralStorage": { + size: 511 + } + } +}`, + wantSpec: FunctionManifest{}, + wantErr: true, + }, + { + name: "no function code defined", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 5 + } +}`, + wantSpec: FunctionManifest{}, + wantErr: true, + }, + { + name: "no error in case of multiple function code defined", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 5, + "image": "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + "source": { + "git": "git@remote-url", + "ref": "master", + "path": "./" + } + } +}`, + wantSpec: FunctionManifest{ + Kind: "LambdaFunction", + APIVersion: "pipecd.dev/v1beta1", + Spec: FunctionManifestSpec{ + Name: "SimpleFunction", + Role: "arn:aws:iam::xxxxx:role/lambda-role", + Memory: 128, + Timeout: 5, + ImageURI: "ecr.region.amazonaws.com/lambda-simple-function:v0.0.1", + SourceCode: SourceCode{ + Git: "git@remote-url", + Ref: "master", + Path: "./", + }, + }, + }, + wantErr: false, + }, + { + name: "missing required values in case of using other than container image as function", + data: `{ + "apiVersion": "pipecd.dev/v1beta1", + "kind": "LambdaFunction", + "spec": { + "name": "SimpleFunction", + "role": "arn:aws:iam::xxxxx:role/lambda-role", + "memory": 128, + "timeout": 10, + "s3Bucket": "pipecd-sample", + "s3Key": "function-code", + "s3ObjectVersion": "xyz" + } +}`, + wantSpec: FunctionManifest{}, + wantErr: true, + }, + } + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + fm, err := parseFunctionManifest([]byte(tc.data)) + assert.Equal(t, tc.wantErr, err != nil) + assert.Equal(t, tc.wantSpec, fm) + }) + } +} + +func TestFindArtifactVersions(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + input []byte + expected []*model.ArtifactVersion + expectedErr bool + }{ + { + name: "[From container image] ok: using container image", + input: []byte(` +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleFunction + image: ecr.ap-northeast-1.amazonaws.com/lambda-test:v0.0.1 + role: arn:aws:iam::76xxxxxxx:role/lambda-role + memory: 512 + timeout: 30 +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_CONTAINER_IMAGE, + Version: "v0.0.1", + Name: "lambda-test", + Url: "ecr.ap-northeast-1.amazonaws.com/lambda-test:v0.0.1", + }, + }, + expectedErr: false, + }, + { + name: "[From container image] error: no image name", + input: []byte(` +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleFunction + image: ecr.ap-northeast-1.amazonaws.com/:v0.0.1 + role: arn:aws:iam::76xxxxxxx:role/lambda-role + memory: 512 + timeout: 30 +`), + expected: nil, + expectedErr: true, + }, + { + name: "[From S3] ok: using s3 object", + input: []byte(` +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleZipPackingS3Function + role: arn:aws:iam::76xxxxxxx:role/lambda-role + s3Bucket: pipecd-sample-lambda + s3Key: pipecd-sample-src + s3ObjectVersion: 1pTK9_v0Kd7I8Sk4n6abzCL + handler: app.lambdaHandler + runtime: nodejs14.x + memory: 512 + timeout: 30 +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_S3_OBJECT, + Version: "1pTK9_v0Kd7I8Sk4n6abzCL", + Name: "pipecd-sample-src", + Url: "https://console.aws.amazon.com/s3/object/pipecd-sample-lambda?prefix=pipecd-sample-src", + }, + }, + expectedErr: false, + }, + { + name: "[From Source Code] ok: using source code", + input: []byte(` +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleSourceCodeFunction + role: arn:aws:iam::76xxxxxxx:role/lambda-role + source: + git: git@github.com:username/lambda-function-code.git + ref: dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603 + path: hello-world + handler: app.lambdaHandler + runtime: nodejs14.x + memory: 512 + timeout: 30 +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_GIT_SOURCE, + Version: "dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603", + Name: "username/lambda-function-code", + Url: "https://github.com/username/lambda-function-code/commit/dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603", + }, + }, + expectedErr: false, + }, + { + name: "[From Source Code] ok: using source code from gitlab", + input: []byte(` +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleSourceCodeFunction + role: arn:aws:iam::76xxxxxxx:role/lambda-role + source: + git: git@gitlab.com:username/lambda-function-code.git + ref: dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603 + path: hello-world + handler: app.lambdaHandler + runtime: nodejs14.x + memory: 512 + timeout: 30 +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_GIT_SOURCE, + Version: "dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603", + Name: "username/lambda-function-code", + Url: "https://gitlab.com/username/lambda-function-code/commit/dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603", + }, + }, + expectedErr: false, + }, + { + name: "[From Source Code] ok: using source code from bitbucket", + input: []byte(` +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleSourceCodeFunction + role: arn:aws:iam::76xxxxxxx:role/lambda-role + source: + git: git@bitbucket.org:username/lambda-function-code.git + ref: dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603 + path: hello-world + handler: app.lambdaHandler + runtime: nodejs14.x + memory: 512 + timeout: 30 +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_GIT_SOURCE, + Version: "dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603", + Name: "username/lambda-function-code", + Url: "https://bitbucket.org/username/lambda-function-code/commits/dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603", + }, + }, + expectedErr: false, + }, + { + name: "[From Source Code] ok: using source code from other git provider", + input: []byte(` +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleSourceCodeFunction + role: arn:aws:iam::76xxxxxxx:role/lambda-role + source: + git: git@ghe.github.com:username/lambda-function-code.git + ref: dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603 + path: hello-world + handler: app.lambdaHandler + runtime: nodejs14.x + memory: 512 + timeout: 30 +`), + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_GIT_SOURCE, + Version: "dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603", + Name: "", + Url: "", + }, + }, + expectedErr: false, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + fm, _ := parseFunctionManifest(tc.input) + versions, err := FindArtifactVersions(fm) + + assert.Equal(t, tc.expectedErr, err != nil) + assert.ElementsMatch(t, tc.expected, versions) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/lambda/lambda.go b/pkg/app/pipedv1/platformprovider/lambda/lambda.go new file mode 100644 index 0000000000..73d8b3a6d2 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/lambda/lambda.go @@ -0,0 +1,90 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "context" + "io" + "path/filepath" + "sync" + + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + + "github.com/pipe-cd/pipecd/pkg/config" +) + +// Client is wrapper of AWS client. +type Client interface { + IsFunctionExist(ctx context.Context, name string) (bool, error) + CreateFunction(ctx context.Context, fm FunctionManifest) error + CreateFunctionFromSource(ctx context.Context, fm FunctionManifest, zip io.Reader) error + UpdateFunction(ctx context.Context, fm FunctionManifest) error + UpdateFunctionFromSource(ctx context.Context, fm FunctionManifest, zip io.Reader) error + PublishFunction(ctx context.Context, fm FunctionManifest) (version string, err error) + GetTrafficConfig(ctx context.Context, fm FunctionManifest) (routingTrafficCfg RoutingTrafficConfig, err error) + CreateTrafficConfig(ctx context.Context, fm FunctionManifest, version string) error + UpdateTrafficConfig(ctx context.Context, fm FunctionManifest, routingTraffic RoutingTrafficConfig) error +} + +// Registry holds a pool of aws client wrappers. +type Registry interface { + Client(name string, cfg *config.PlatformProviderLambdaConfig, logger *zap.Logger) (Client, error) +} + +// LoadFunctionManifest returns FunctionManifest object from a given Function config manifest file. +func LoadFunctionManifest(appDir, functionManifestFilename string) (FunctionManifest, error) { + path := filepath.Join(appDir, functionManifestFilename) + return loadFunctionManifest(path) +} + +type registry struct { + clients map[string]Client + mu sync.RWMutex + newGroup *singleflight.Group +} + +func (r *registry) Client(name string, cfg *config.PlatformProviderLambdaConfig, logger *zap.Logger) (Client, error) { + r.mu.RLock() + client, ok := r.clients[name] + r.mu.RUnlock() + if ok { + return client, nil + } + + c, err, _ := r.newGroup.Do(name, func() (interface{}, error) { + return newClient(cfg.Region, cfg.Profile, cfg.CredentialsFile, cfg.RoleARN, cfg.TokenFile, logger) + }) + if err != nil { + return nil, err + } + + client = c.(Client) + r.mu.Lock() + r.clients[name] = client + r.mu.Unlock() + + return client, nil +} + +var defaultRegistry = ®istry{ + clients: make(map[string]Client), + newGroup: &singleflight.Group{}, +} + +// DefaultRegistry returns a pool of aws clients and a mutex associated with it. +func DefaultRegistry() Registry { + return defaultRegistry +} diff --git a/pkg/app/pipedv1/platformprovider/lambda/routing_traffic.go b/pkg/app/pipedv1/platformprovider/lambda/routing_traffic.go new file mode 100644 index 0000000000..b11a7e3d64 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/lambda/routing_traffic.go @@ -0,0 +1,53 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lambda + +import ( + "encoding/json" +) + +// TrafficConfigKeyName represents key for lambda service config map. +type TrafficConfigKeyName string + +const ( + // TrafficPrimaryVersionKeyName represents the key points to primary version config on traffic routing map. + TrafficPrimaryVersionKeyName TrafficConfigKeyName = "primary" + // TrafficSecondaryVersionKeyName represents the key points to primary version config on traffic routing map. + TrafficSecondaryVersionKeyName TrafficConfigKeyName = "secondary" +) + +// RoutingTrafficConfig presents a map of primary and secondary version traffic for lambda function alias. +type RoutingTrafficConfig map[TrafficConfigKeyName]VersionTraffic + +func (c *RoutingTrafficConfig) Encode() (string, error) { + out, err := json.Marshal(c) + if err != nil { + return "", err + } + return string(out), nil +} + +func (c *RoutingTrafficConfig) Decode(data []byte) error { + if err := json.Unmarshal(data, c); err != nil { + return err + } + return nil +} + +// VersionTraffic presents the version, and the percent of traffic that's routed to it. +type VersionTraffic struct { + Version string `json:"version"` + Percent float64 `json:"percent"` +} diff --git a/pkg/app/pipedv1/platformprovider/platformprovider.go b/pkg/app/pipedv1/platformprovider/platformprovider.go new file mode 100644 index 0000000000..807cce4ca9 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/platformprovider.go @@ -0,0 +1,24 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package platformprovider + +import ( + "errors" +) + +var ( + ErrNotFound = errors.New("not found") + ErrUnimplemented = errors.New("unimplemented") +) diff --git a/pkg/app/pipedv1/platformprovider/terraform/module.go b/pkg/app/pipedv1/platformprovider/terraform/module.go new file mode 100644 index 0000000000..39929d076a --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/module.go @@ -0,0 +1,128 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +// FileMapping is a schema for Terraform file. +type FileMapping struct { + ModuleMappings []*ModuleMapping `hcl:"module,block"` + Remain hcl.Body `hcl:",remain"` +} + +// ModuleMapping is a schema for "module" block in Terraform file. +type ModuleMapping struct { + Name string `hcl:"name,label"` + Source string `hcl:"source"` + Version string `hcl:"version,optional"` + Remain hcl.Body `hcl:",remain"` +} + +// File represents a Terraform file. +type File struct { + Modules []*Module +} + +// Module represents a "module" block in Terraform file. +type Module struct { + Name string + Source string + Version string +} + +const tfFileExtension = ".tf" + +// LoadTerraformFiles loads terraform files from a given dir. +func LoadTerraformFiles(dir string) ([]File, error) { + fileInfos, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + filepaths := make([]string, 0) + for _, f := range fileInfos { + if f.IsDir() { + continue + } + + if ext := filepath.Ext(f.Name()); ext != tfFileExtension { + continue + } + + filepaths = append(filepaths, filepath.Join(dir, f.Name())) + } + + if len(filepaths) == 0 { + return nil, fmt.Errorf("couldn't find terraform module") + } + + p := hclparse.NewParser() + tfs := make([]File, 0, len(filepaths)) + for _, fp := range filepaths { + f, diags := p.ParseHCLFile(fp) + if diags.HasErrors() { + return nil, diags + } + + fm := &FileMapping{} + diags = gohcl.DecodeBody(f.Body, nil, fm) + if diags.HasErrors() { + return nil, diags + } + + tf := File{ + Modules: make([]*Module, 0, len(fm.ModuleMappings)), + } + for _, m := range fm.ModuleMappings { + tf.Modules = append(tf.Modules, &Module{ + Name: m.Name, + Source: m.Source, + Version: m.Version, + }) + } + + tfs = append(tfs, tf) + } + + return tfs, nil +} + +// FindArtifactVersions parses artifact versions from Terraform files. +// For Terraform, module version is an artifact version. +func FindArtifactVersions(tfs []File) ([]*model.ArtifactVersion, error) { + versions := make([]*model.ArtifactVersion, 0) + for _, tf := range tfs { + for _, m := range tf.Modules { + versions = append(versions, &model.ArtifactVersion{ + Kind: model.ArtifactVersion_TERRAFORM_MODULE, + Version: m.Version, + Name: m.Name, + Url: m.Source, + }) + } + } + + return versions, nil +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/module_test.go b/pkg/app/pipedv1/platformprovider/terraform/module_test.go new file mode 100644 index 0000000000..fd6172d1ab --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/module_test.go @@ -0,0 +1,200 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pipe-cd/pipecd/pkg/model" +) + +func TestLoadTerraformFiles(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + moduleDir string + expected []File + expectedErr bool + }{ + { + name: "single module", + moduleDir: "./testdata/single_module", + expected: []File{ + { + Modules: []*Module{ + { + Name: "helloworld", + Source: "helloworld", + Version: "v1.0.0", + }, + }, + }, + }, + expectedErr: false, + }, + { + name: "single module with optional argument", + moduleDir: "./testdata/single_module_optional", + expected: []File{ + { + Modules: []*Module{ + { + Name: "helloworld", + Source: "helloworld", + Version: "", + }, + }, + }, + }, + expectedErr: false, + }, + { + name: "multi modules", + moduleDir: "./testdata/multi_modules", + expected: []File{ + { + Modules: []*Module{ + { + Name: "helloworld_01", + Source: "helloworld", + Version: "v1.0.0", + }, + { + Name: "helloworld_02", + Source: "helloworld", + Version: "v0.9.0", + }, + }, + }, + }, + expectedErr: false, + }, + { + name: "multi modules with multi files", + moduleDir: "./testdata/multi_modules_with_multi_files", + expected: []File{ + { + Modules: []*Module{ + { + Name: "helloworld_01", + Source: "helloworld", + Version: "v1.0.0", + }, + }, + }, + { + Modules: []*Module{ + { + Name: "helloworld_02", + Source: "helloworld", + Version: "v0.9.0", + }, + }, + }, + }, + expectedErr: false, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + tfs, err := LoadTerraformFiles(tc.moduleDir) + if err != nil { + t.Fatal(err) + } + + assert.ElementsMatch(t, tc.expected, tfs) + assert.Equal(t, tc.expectedErr, err != nil) + }) + } +} + +func TestFindArticatVersions(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + moduleDir string + expected []*model.ArtifactVersion + expectedErr bool + }{ + { + name: "single module", + moduleDir: "./testdata/single_module", + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_TERRAFORM_MODULE, + Name: "helloworld", + Url: "helloworld", + Version: "v1.0.0", + }, + }, + expectedErr: false, + }, + { + name: "single module with optional field", + moduleDir: "./testdata/single_module_optional", + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_TERRAFORM_MODULE, + Name: "helloworld", + Url: "helloworld", + Version: "", + }, + }, + expectedErr: false, + }, + { + name: "multi modules", + moduleDir: "./testdata/multi_modules", + expected: []*model.ArtifactVersion{ + { + Kind: model.ArtifactVersion_TERRAFORM_MODULE, + Name: "helloworld_01", + Url: "helloworld", + Version: "v1.0.0", + }, + { + Kind: model.ArtifactVersion_TERRAFORM_MODULE, + Name: "helloworld_02", + Url: "helloworld", + Version: "v0.9.0", + }, + }, + expectedErr: false, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + tfs, err := LoadTerraformFiles(tc.moduleDir) + require.NoError(t, err) + + versions, err := FindArtifactVersions(tfs) + assert.ElementsMatch(t, tc.expected, versions) + assert.Equal(t, tc.expectedErr, err != nil) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/terraform.go b/pkg/app/pipedv1/platformprovider/terraform/terraform.go new file mode 100644 index 0000000000..f28121df2e --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/terraform.go @@ -0,0 +1,408 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strconv" + "strings" +) + +type options struct { + noColor bool + vars []string + varFiles []string + + sharedFlags []string + initFlags []string + planFlags []string + applyFlags []string + + sharedEnvs []string + initEnvs []string + planEnvs []string + applyEnvs []string +} + +type Option func(*options) + +func WithoutColor() Option { + return func(opts *options) { + opts.noColor = true + } +} + +func WithVars(vars []string) Option { + return func(opts *options) { + opts.vars = vars + } +} + +func WithVarFiles(files []string) Option { + return func(opts *options) { + opts.varFiles = files + } +} + +func WithAdditionalFlags(shared, init, plan, apply []string) Option { + return func(opts *options) { + opts.sharedFlags = append(opts.sharedFlags, shared...) + opts.initFlags = append(opts.initFlags, init...) + opts.planFlags = append(opts.planFlags, plan...) + opts.applyFlags = append(opts.applyFlags, apply...) + } +} + +func WithAdditionalEnvs(shared, init, plan, apply []string) Option { + return func(opts *options) { + opts.sharedEnvs = append(opts.sharedEnvs, shared...) + opts.initEnvs = append(opts.initEnvs, init...) + opts.planEnvs = append(opts.planEnvs, plan...) + opts.applyEnvs = append(opts.applyEnvs, apply...) + } +} + +type Terraform struct { + execPath string + dir string + + options options +} + +func NewTerraform(execPath, dir string, opts ...Option) *Terraform { + opt := options{} + for _, o := range opts { + o(&opt) + } + + return &Terraform{ + execPath: execPath, + dir: dir, + options: opt, + } +} + +func (t *Terraform) Version(ctx context.Context) (string, error) { + args := []string{"version"} + cmd := exec.CommandContext(ctx, t.execPath, args...) + cmd.Dir = t.dir + cmd.Env = append(os.Environ(), t.options.sharedEnvs...) + + out, err := cmd.CombinedOutput() + if err != nil { + return string(out), err + } + + return strings.TrimSpace(string(out)), nil +} + +func (t *Terraform) Init(ctx context.Context, w io.Writer) error { + args := []string{ + "init", + } + args = append(args, t.makeCommonCommandArgs()...) + args = append(args, t.options.initFlags...) + + cmd := exec.CommandContext(ctx, t.execPath, args...) + cmd.Dir = t.dir + cmd.Stdout = w + cmd.Stderr = w + + env := append(os.Environ(), t.options.sharedEnvs...) + env = append(env, t.options.initEnvs...) + cmd.Env = env + + io.WriteString(w, fmt.Sprintf("terraform %s", strings.Join(args, " "))) + return cmd.Run() +} + +func (t *Terraform) SelectWorkspace(ctx context.Context, workspace string) error { + args := []string{ + "workspace", + "select", + workspace, + } + cmd := exec.CommandContext(ctx, t.execPath, args...) + cmd.Dir = t.dir + cmd.Env = append(os.Environ(), t.options.sharedEnvs...) + + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to select workspace: %s (%w)", string(out), err) + } + + return nil +} + +type PlanResult struct { + Adds int + Changes int + Destroys int + Imports int + + PlanOutput string +} + +func (r PlanResult) NoChanges() bool { + return r.Adds == 0 && r.Changes == 0 && r.Destroys == 0 && r.Imports == 0 +} + +func (r PlanResult) Render() (string, error) { + terraformDiffStart := "Terraform will perform the following actions:" + startIndex := strings.Index(r.PlanOutput, terraformDiffStart) + len(terraformDiffStart) + + terraformDiffEnd := fmt.Sprintf("Plan: %d to import, %d to add, %d to change, %d to destroy.", r.Imports, r.Adds, r.Changes, r.Destroys) + endIndex := strings.Index(r.PlanOutput, terraformDiffEnd) + len(terraformDiffEnd) + if endIndex < startIndex { + terraformDiffEnd = fmt.Sprintf("Plan: %d to add, %d to change, %d to destroy.", r.Adds, r.Changes, r.Destroys) + endIndex = strings.Index(r.PlanOutput, terraformDiffEnd) + len(terraformDiffEnd) + } + + if endIndex < startIndex { + return "", fmt.Errorf("unable to parse Terraform plan result") + } + + out := r.PlanOutput[startIndex:endIndex] + + rendered := "" + var curlyBracketStack []rune + var squareBracketStack []rune + + scanner := bufio.NewScanner(strings.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 { + continue + } + + r := []rune(line) + tail := r[len(r)-1] + + // The outermost nest does not have a sign. + if tail == '{' && len(curlyBracketStack) == 0 { + // Terraform's outermost block would be resource block. + deadline := strings.Index(string(r), "resource") + for i := 0; i < deadline; i++ { + r[i] = ' ' + } + } + + // Get head rune without tab and space. + head, pos := headRuneWithoutWhiteSpace(r) + if pos < 0 { + continue + } + + // Move sign to the beginning. + if head == '+' || head == '-' || head == '~' { + r[0], r[pos] = r[pos], r[0] + } + + // Corresponding pairs with corresponding sign. + if tail == '{' { + curlyBracketStack = append(curlyBracketStack, r[0]) + } + if head == '}' { + r[0] = signMatchBracket(&curlyBracketStack, r[0]) + } + if tail == '[' { + squareBracketStack = append(squareBracketStack, r[0]) + } + if head == ']' { + r[0] = signMatchBracket(&squareBracketStack, r[0]) + } + + rendered += string(r) + rendered += "\n" + } + + return rendered, nil +} + +// Return rune at the top of the stack, or r in case of error. +func signMatchBracket(l *[]rune, r rune) rune { + list := *l + if len(list) == 0 { + return r + } + n := len(list) - 1 + v := list[n] + *l = list[:n] + return v +} + +func headRuneWithoutWhiteSpace(r []rune) (rune, int) { + for i, ri := range r { + if !(ri == '\t' || ri == ' ') { + return ri, i + } + } + return ' ', -1 +} + +func GetExitCode(err error) int { + if err == nil { + return 0 + } + if exitErr, ok := err.(*exec.ExitError); ok { + return exitErr.ExitCode() + } + return 1 +} + +func (t *Terraform) Plan(ctx context.Context, w io.Writer) (PlanResult, error) { + args := []string{ + "plan", + "-lock=false", + "-detailed-exitcode", + } + args = append(args, t.makeCommonCommandArgs()...) + args = append(args, t.options.planFlags...) + + var buf bytes.Buffer + stdout := io.MultiWriter(w, &buf) + + cmd := exec.CommandContext(ctx, t.execPath, args...) + cmd.Dir = t.dir + cmd.Stdout = stdout + cmd.Stderr = stdout + + env := append(os.Environ(), t.options.sharedEnvs...) + env = append(env, t.options.planEnvs...) + cmd.Env = env + + io.WriteString(w, fmt.Sprintf("terraform %s", strings.Join(args, " "))) + err := cmd.Run() + switch GetExitCode(err) { + case 0: + return PlanResult{}, nil + case 2: + return parsePlanResult(buf.String(), !t.options.noColor) + default: + return PlanResult{}, err + } +} + +func (t *Terraform) makeCommonCommandArgs() (args []string) { + if t.options.noColor { + args = append(args, "-no-color") + } + for _, v := range t.options.vars { + args = append(args, fmt.Sprintf("-var=%s", v)) + } + for _, f := range t.options.varFiles { + args = append(args, fmt.Sprintf("-var-file=%s", f)) + } + args = append(args, t.options.sharedFlags...) + return +} + +var ( + // Import block was introduced from Terraform v1.5.0. + // Keep this regex for backward compatibility. + planHasChangeRegex = regexp.MustCompile(`(?m)^Plan:(?: (\d+) to import,)?? (\d+) to add, (\d+) to change, (\d+) to destroy.$`) + planNoChangesRegex = regexp.MustCompile(`(?m)^No changes. Infrastructure is up-to-date.$`) +) + +// Borrowed from https://github.com/acarl005/stripansi +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var ansiRegex = regexp.MustCompile(ansi) + +func stripAnsiCodes(str string) string { + return ansiRegex.ReplaceAllString(str, "") +} + +func parsePlanResult(out string, ansiIncluded bool) (PlanResult, error) { + parseNums := func(vals ...string) (imports, adds, changes, destroys int, err error) { + impt := vals[0] + add := vals[1] + change := vals[2] + destroy := vals[3] + + if impt != "" { + imports, err = strconv.Atoi(impt) + if err != nil { + return + } + } + + adds, err = strconv.Atoi(add) + if err != nil { + return + } + changes, err = strconv.Atoi(change) + if err != nil { + return + } + destroys, err = strconv.Atoi(destroy) + if err != nil { + return + } + return + } + + if ansiIncluded { + out = stripAnsiCodes(out) + } + + if s := planHasChangeRegex.FindStringSubmatch(out); len(s) == 5 { + imports, adds, changes, destroys, err := parseNums(s[1:]...) + if err == nil { + return PlanResult{ + Adds: adds, + Changes: changes, + Destroys: destroys, + Imports: imports, + PlanOutput: out, + }, nil + } + } + + if s := planNoChangesRegex.FindStringSubmatch(out); len(s) > 0 { + return PlanResult{}, nil + } + + return PlanResult{}, fmt.Errorf("unable to parse plan output") +} + +func (t *Terraform) Apply(ctx context.Context, w io.Writer) error { + args := []string{ + "apply", + "-auto-approve", + "-input=false", + } + args = append(args, t.makeCommonCommandArgs()...) + args = append(args, t.options.applyFlags...) + + cmd := exec.CommandContext(ctx, t.execPath, args...) + cmd.Dir = t.dir + cmd.Stdout = w + cmd.Stderr = w + + env := append(os.Environ(), t.options.sharedEnvs...) + env = append(env, t.options.applyEnvs...) + cmd.Env = env + + io.WriteString(w, fmt.Sprintf("terraform %s", strings.Join(args, " "))) + return cmd.Run() +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/terraform_test.go b/pkg/app/pipedv1/platformprovider/terraform/terraform_test.go new file mode 100644 index 0000000000..7580221cf0 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/terraform_test.go @@ -0,0 +1,153 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terraform + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPlanHasChangeRegex(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + input string + expected []string + }{ + { + name: "older than v1.5.0", + input: "Plan: 1 to add, 2 to change, 3 to destroy.", + expected: []string{"Plan: 1 to add, 2 to change, 3 to destroy.", "", "1", "2", "3"}, + }, + { + name: "later than v1.5.0", + input: "Plan: 0 to import, 1 to add, 2 to change, 3 to destroy.", + expected: []string{"Plan: 0 to import, 1 to add, 2 to change, 3 to destroy.", "0", "1", "2", "3"}, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + assert.Equal(t, tc.expected, planHasChangeRegex.FindStringSubmatch(tc.input)) + }) + } +} + +func TestParsePlanResult(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + input string + expected PlanResult + expectedErr bool + }{ + { + name: "older than v1.5.0", + input: `Plan: 1 to add, 2 to change, 3 to destroy.`, + expected: PlanResult{Adds: 1, Changes: 2, Destroys: 3, PlanOutput: "Plan: 1 to add, 2 to change, 3 to destroy."}, + expectedErr: false, + }, + { + name: "later than v1.5.0", + input: `Plan: 1 to import, 1 to add, 2 to change, 3 to destroy.`, + expected: PlanResult{Imports: 1, Adds: 1, Changes: 2, Destroys: 3, PlanOutput: "Plan: 1 to import, 1 to add, 2 to change, 3 to destroy."}, + expectedErr: false, + }, + { + name: "Invalid number of changes", + input: `Plan: a to add, 2 to change, 3 to destroy.`, + expectedErr: true, + }, + { + name: "Invalid plan result output", + input: `Plan: 1 to add, 2 to change.`, + expectedErr: true, + }, + { + name: "No changes", + input: `No changes. Infrastructure is up-to-date.`, + expectedErr: false, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + result, err := parsePlanResult(tc.input, false) + assert.Equal(t, tc.expectedErr, err != nil) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestRender(t *testing.T) { + t.Parallel() + + testcases := []struct { + name string + expected string + planResult *PlanResult + expectedErr bool + }{ + { + name: "success", + planResult: &PlanResult{ + Imports: 1, + Adds: 2, + Changes: 3, + Destroys: 4, + PlanOutput: ` +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + resource "test-add" "test" { + + id = (known after apply) + } + - resource "test-del" "test" { + + id = "foo" + } + +Plan: 1 to import, 2 to add, 3 to change, 4 to destroy. +`, + }, + expected: ` resource "test-add" "test" { ++ id = (known after apply) + } + resource "test-del" "test" { ++ id = "foo" + } +Plan: 1 to import, 2 to add, 3 to change, 4 to destroy. +`, + expectedErr: false, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + actual, err := tc.planResult.Render() + assert.Equal(t, tc.expected, actual) + assert.Equal(t, tc.expectedErr, err != nil) + }) + } +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules/helloworld/main.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules/helloworld/main.tf new file mode 100644 index 0000000000..6dadf55b5b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules/helloworld/main.tf @@ -0,0 +1,19 @@ +resource "docker_container" "helloworld" { + name = "gcr.io/pipecd/helloworld:${var.image_version}" + ports { + internal = 9376 + external = "${var.external_port}" + } +} + +variable "external_port" { + default = 80 +} + +variable "image_version" { + default = "latest" +} + +output "container" { + value = docker_image.helloworld +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules/main.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules/main.tf new file mode 100644 index 0000000000..1be67f9be9 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules/main.tf @@ -0,0 +1,16 @@ +provider "docker" { +} + +module "helloworld_01" { + source = "helloworld" + version = "v1.0.0" + image_version = "v1.0.0" + external_port = 8080 +} + +module "helloworld_02" { + source = "helloworld" + version = "v0.9.0" + image_version = "v0.9.0" + external_port = 8081 +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld/main.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld/main.tf new file mode 100644 index 0000000000..6dadf55b5b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld/main.tf @@ -0,0 +1,19 @@ +resource "docker_container" "helloworld" { + name = "gcr.io/pipecd/helloworld:${var.image_version}" + ports { + internal = 9376 + external = "${var.external_port}" + } +} + +variable "external_port" { + default = 80 +} + +variable "image_version" { + default = "latest" +} + +output "container" { + value = docker_image.helloworld +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld_01.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld_01.tf new file mode 100644 index 0000000000..cc0d21caa4 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld_01.tf @@ -0,0 +1,9 @@ +provider "docker" { +} + +module "helloworld_01" { + source = "helloworld" + version = "v1.0.0" + image_version = "v1.0.0" + external_port = 8080 +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld_02.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld_02.tf new file mode 100644 index 0000000000..701f080667 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/multi_modules_with_multi_files/helloworld_02.tf @@ -0,0 +1,9 @@ +provider "docker" { +} + +module "helloworld_02" { + source = "helloworld" + version = "v0.9.0" + image_version = "v0.9.0" + external_port = 8081 +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module/helloworld/main.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module/helloworld/main.tf new file mode 100644 index 0000000000..6dadf55b5b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module/helloworld/main.tf @@ -0,0 +1,19 @@ +resource "docker_container" "helloworld" { + name = "gcr.io/pipecd/helloworld:${var.image_version}" + ports { + internal = 9376 + external = "${var.external_port}" + } +} + +variable "external_port" { + default = 80 +} + +variable "image_version" { + default = "latest" +} + +output "container" { + value = docker_image.helloworld +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module/main.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module/main.tf new file mode 100644 index 0000000000..17210fe9a0 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module/main.tf @@ -0,0 +1,9 @@ +provider "docker" { +} + +module "helloworld" { + source = "helloworld" + version = "v1.0.0" + image_version = "v1.0.0" + external_port = 8080 +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module_optional/helloworld/main.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module_optional/helloworld/main.tf new file mode 100644 index 0000000000..6dadf55b5b --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module_optional/helloworld/main.tf @@ -0,0 +1,19 @@ +resource "docker_container" "helloworld" { + name = "gcr.io/pipecd/helloworld:${var.image_version}" + ports { + internal = 9376 + external = "${var.external_port}" + } +} + +variable "external_port" { + default = 80 +} + +variable "image_version" { + default = "latest" +} + +output "container" { + value = docker_image.helloworld +} diff --git a/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module_optional/main.tf b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module_optional/main.tf new file mode 100644 index 0000000000..2ab0a5fa49 --- /dev/null +++ b/pkg/app/pipedv1/platformprovider/terraform/testdata/single_module_optional/main.tf @@ -0,0 +1,8 @@ +provider "docker" { +} + +module "helloworld" { + source = "helloworld" + image_version = "v1.0.0" + external_port = 8080 +} diff --git a/pkg/app/pipedv1/plugin/platform/kubernetes/planner/pipeline.go b/pkg/app/pipedv1/plugin/platform/kubernetes/planner/pipeline.go index cd95743cb2..78e56c2ebf 100644 --- a/pkg/app/pipedv1/plugin/platform/kubernetes/planner/pipeline.go +++ b/pkg/app/pipedv1/plugin/platform/kubernetes/planner/pipeline.go @@ -15,50 +15,19 @@ package planner import ( + "encoding/json" "fmt" - "strings" "time" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/planner" "github.com/pipe-cd/pipecd/pkg/config" "github.com/pipe-cd/pipecd/pkg/model" ) -const ( - PredefinedStageK8sSync = "K8sSync" - PredefinedStageRollback = "Rollback" -) - -var predefinedStages = map[string]config.PipelineStage{ - PredefinedStageK8sSync: { - ID: PredefinedStageK8sSync, - Name: model.StageK8sSync, - Desc: "Sync by applying all manifests", - }, -} - -// GetPredefinedStage finds and returns the predefined stage for the given id. -func GetPredefinedStage(id string) (config.PipelineStage, bool) { - stage, ok := predefinedStages[id] - return stage, ok -} - -// MakeInitialStageMetadata makes the initial metadata for the given state configuration. -func MakeInitialStageMetadata(cfg config.PipelineStage) map[string]string { - switch cfg.Name { - case model.StageWaitApproval: - return map[string]string{ - "Approvers": strings.Join(cfg.WaitApprovalStageOptions.Approvers, ","), - } - default: - return nil - } -} - - func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineStage { var ( preStageID = "" - stage, _ = GetPredefinedStage(PredefinedStageK8sSync) + stage, _ = planner.GetPredefinedStage(planner.PredefinedStageK8sSync) stages = []config.PipelineStage{stage} out = make([]*model.PipelineStage, 0, len(stages)) ) @@ -76,7 +45,7 @@ func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineS Predefined: true, Visible: true, Status: model.StageStatus_STAGE_NOT_STARTED_YET, - Metadata: MakeInitialStageMetadata(s), + Metadata: planner.MakeInitialStageMetadata(s), CreatedAt: now.Unix(), UpdatedAt: now.Unix(), } @@ -88,7 +57,7 @@ func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineS } if autoRollback { - s, _ := GetPredefinedStage(PredefinedStageRollback) + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) out = append(out, &model.PipelineStage{ Id: s.ID, Name: s.Name.String(), @@ -103,3 +72,75 @@ func buildQuickSyncPipeline(autoRollback bool, now time.Time) []*model.PipelineS return out } + +func buildProgressivePipeline(pp *config.DeploymentPipeline, autoRollback bool, now time.Time) []*model.PipelineStage { + var ( + preStageID = "" + out = make([]*model.PipelineStage, 0, len(pp.Stages)) + ) + + for i, s := range pp.Stages { + id := s.ID + if id == "" { + id = fmt.Sprintf("stage-%d", i) + } + stage := &model.PipelineStage{ + Id: id, + Name: s.Name.String(), + Desc: s.Desc, + Index: int32(i), + Predefined: false, + Visible: true, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: planner.MakeInitialStageMetadata(s), + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + } + if preStageID != "" { + stage.Requires = []string{preStageID} + } + preStageID = id + out = append(out, stage) + } + + if autoRollback { + s, _ := planner.GetPredefinedStage(planner.PredefinedStageRollback) + out = append(out, &model.PipelineStage{ + Id: s.ID, + Name: s.Name.String(), + Desc: s.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + + // Add a stage for rolling back script run stages. + for i, s := range pp.Stages { + if s.Name == model.StageScriptRun { + // Use metadata as a way to pass parameters to the stage. + envStr, _ := json.Marshal(s.ScriptRunStageOptions.Env) + metadata := map[string]string{ + "baseStageID": out[i].Id, + "onRollback": s.ScriptRunStageOptions.OnRollback, + "env": string(envStr), + } + ss, _ := planner.GetPredefinedStage(planner.PredefinedStageScriptRunRollback) + out = append(out, &model.PipelineStage{ + Id: ss.ID, + Name: ss.Name.String(), + Desc: ss.Desc, + Predefined: true, + Visible: false, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Metadata: metadata, + CreatedAt: now.Unix(), + UpdatedAt: now.Unix(), + }) + } + } + } + + return out +} diff --git a/pkg/app/pipedv1/toolregistry/install.go b/pkg/app/pipedv1/toolregistry/install.go new file mode 100644 index 0000000000..b2acb36abf --- /dev/null +++ b/pkg/app/pipedv1/toolregistry/install.go @@ -0,0 +1,228 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toolregistry + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "text/template" + + "go.uber.org/zap" +) + +const ( + defaultKubectlVersion = "1.18.2" + defaultKustomizeVersion = "3.8.1" + defaultHelmVersion = "3.8.2" + defaultTerraformVersion = "0.13.0" +) + +var ( + kubectlInstallScriptTmpl = template.Must(template.New("kubectl").Parse(kubectlInstallScript)) + kustomizeInstallScriptTmpl = template.Must(template.New("kustomize").Parse(kustomizeInstallScript)) + helmInstallScriptTmpl = template.Must(template.New("helm").Parse(helmInstallScript)) + terraformInstallScriptTmpl = template.Must(template.New("terraform").Parse(terraformInstallScript)) +) + +func (r *registry) installKubectl(ctx context.Context, version string) error { + workingDir, err := os.MkdirTemp("", "kubectl-install") + if err != nil { + return err + } + defer os.RemoveAll(workingDir) + + asDefault := version == "" + if asDefault { + version = defaultKubectlVersion + } + + var ( + buf bytes.Buffer + data = map[string]interface{}{ + "WorkingDir": workingDir, + "Version": version, + "BinDir": r.binDir, + "AsDefault": asDefault, + } + ) + if err := kubectlInstallScriptTmpl.Execute(&buf, data); err != nil { + r.logger.Error("failed to render kubectl install script", + zap.String("version", version), + zap.Error(err), + ) + return fmt.Errorf("failed to install kubectl %s (%v)", version, err) + } + + var ( + script = buf.String() + cmd = exec.CommandContext(ctx, "/bin/sh", "-c", script) + ) + if out, err := cmd.CombinedOutput(); err != nil { + r.logger.Error("failed to install kubectl", + zap.String("version", version), + zap.String("script", script), + zap.String("out", string(out)), + zap.Error(err), + ) + return fmt.Errorf("failed to install kubectl %s (%v)", version, err) + } + + r.logger.Info("just installed kubectl", zap.String("version", version)) + return nil +} + +func (r *registry) installKustomize(ctx context.Context, version string) error { + workingDir, err := os.MkdirTemp("", "kustomize-install") + if err != nil { + return err + } + defer os.RemoveAll(workingDir) + + asDefault := version == "" + if asDefault { + version = defaultKustomizeVersion + } + + var ( + buf bytes.Buffer + data = map[string]interface{}{ + "WorkingDir": workingDir, + "Version": version, + "BinDir": r.binDir, + "AsDefault": asDefault, + } + ) + if err := kustomizeInstallScriptTmpl.Execute(&buf, data); err != nil { + r.logger.Error("failed to render kustomize install script", + zap.String("version", version), + zap.Error(err), + ) + return fmt.Errorf("failed to install kustomize %s (%v)", version, err) + } + + var ( + script = buf.String() + cmd = exec.CommandContext(ctx, "/bin/sh", "-c", script) + ) + if out, err := cmd.CombinedOutput(); err != nil { + r.logger.Error("failed to install kustomize", + zap.String("version", version), + zap.String("script", script), + zap.String("out", string(out)), + zap.Error(err), + ) + return fmt.Errorf("failed to install kustomize %s (%v)", version, err) + } + + r.logger.Info("just installed kustomize", zap.String("version", version)) + return nil +} + +func (r *registry) installHelm(ctx context.Context, version string) error { + workingDir, err := os.MkdirTemp("", "helm-install") + if err != nil { + return err + } + defer os.RemoveAll(workingDir) + + asDefault := version == "" + if asDefault { + version = defaultHelmVersion + } + + var ( + buf bytes.Buffer + data = map[string]interface{}{ + "WorkingDir": workingDir, + "Version": version, + "BinDir": r.binDir, + "AsDefault": asDefault, + } + ) + if err := helmInstallScriptTmpl.Execute(&buf, data); err != nil { + r.logger.Error("failed to render helm install script", + zap.String("version", version), + zap.Error(err), + ) + return fmt.Errorf("failed to install helm %s (%v)", version, err) + } + + var ( + script = buf.String() + cmd = exec.CommandContext(ctx, "/bin/sh", "-c", script) + ) + if out, err := cmd.CombinedOutput(); err != nil { + r.logger.Error("failed to install helm", + zap.String("version", version), + zap.String("script", script), + zap.String("out", string(out)), + zap.Error(err), + ) + return fmt.Errorf("failed to install helm %s (%v)", version, err) + } + + r.logger.Info("just installed helm", zap.String("version", version)) + return nil +} + +func (r *registry) installTerraform(ctx context.Context, version string) error { + workingDir, err := os.MkdirTemp("", "terraform-install") + if err != nil { + return err + } + defer os.RemoveAll(workingDir) + + asDefault := version == "" + if asDefault { + version = defaultTerraformVersion + } + + var ( + buf bytes.Buffer + data = map[string]interface{}{ + "WorkingDir": workingDir, + "Version": version, + "BinDir": r.binDir, + "AsDefault": asDefault, + } + ) + if err := terraformInstallScriptTmpl.Execute(&buf, data); err != nil { + r.logger.Error("failed to render terraform install script", + zap.String("version", version), + zap.Error(err), + ) + return fmt.Errorf("failed to install terraform %s (%w)", version, err) + } + + var ( + script = buf.String() + cmd = exec.CommandContext(ctx, "/bin/sh", "-c", script) + ) + if out, err := cmd.CombinedOutput(); err != nil { + r.logger.Error("failed to install terraform", + zap.String("version", version), + zap.String("script", script), + zap.String("out", string(out)), + zap.Error(err), + ) + return fmt.Errorf("failed to install terraform %s, %s (%w)", version, string(out), err) + } + + r.logger.Info("just installed terraform", zap.String("version", version)) + return nil +} diff --git a/pkg/app/pipedv1/toolregistry/registry.go b/pkg/app/pipedv1/toolregistry/registry.go new file mode 100644 index 0000000000..7e6d7d53c0 --- /dev/null +++ b/pkg/app/pipedv1/toolregistry/registry.go @@ -0,0 +1,219 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package toolregistry installs and manages the needed tools +// such as kubectl, helm... for executing tasks in pipeline. +package toolregistry + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + + "go.uber.org/zap" + "golang.org/x/sync/singleflight" +) + +// Registry provides functions to get path to the needed tools. +type Registry interface { + Kubectl(ctx context.Context, version string) (string, bool, error) + Kustomize(ctx context.Context, version string) (string, bool, error) + Helm(ctx context.Context, version string) (string, bool, error) + Terraform(ctx context.Context, version string) (string, bool, error) +} + +var defaultRegistry *registry + +// DefaultRegistry returns the shared registry. +func DefaultRegistry() Registry { + return defaultRegistry +} + +// InitDefaultRegistry initializes the default registry. +// This also preloads the pre-installed tools in the binDir. +func InitDefaultRegistry(binDir string, logger *zap.Logger) error { + logger = logger.Named("tool-registry") + if err := os.MkdirAll(binDir, os.ModePerm); err != nil { + return err + } + + tools, err := loadPreinstalledTool(binDir) + if err != nil { + return err + } + logger.Info("successfully loaded the pre-installed tools", zap.Any("tools", tools)) + + defaultRegistry = ®istry{ + binDir: binDir, + versions: tools, + installGroup: &singleflight.Group{}, + logger: logger, + } + + return nil +} + +func loadPreinstalledTool(binDir string) (map[string]struct{}, error) { + tools := make(map[string]struct{}) + err := filepath.Walk(binDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == binDir { + return nil + } + if info.IsDir() { + return filepath.SkipDir + } + if !info.Mode().IsRegular() { + return nil + } + name := filepath.Base(path) + tools[name] = struct{}{} + return nil + }) + if err != nil { + return nil, err + } + return tools, nil +} + +const ( + kubectlPrefix = "kubectl" + kustomizePrefix = "kustomize" + helmPrefix = "helm" + terraformPrefix = "terraform" +) + +type registry struct { + binDir string + versions map[string]struct{} + mu sync.RWMutex + installGroup *singleflight.Group + logger *zap.Logger +} + +func (r *registry) Kubectl(ctx context.Context, version string) (string, bool, error) { + name := kubectlPrefix + if version != "" { + name = fmt.Sprintf("%s-%s", kubectlPrefix, version) + } + path := filepath.Join(r.binDir, name) + + r.mu.RLock() + _, ok := r.versions[name] + r.mu.RUnlock() + if ok { + return path, false, nil + } + + _, err, _ := r.installGroup.Do(name, func() (interface{}, error) { + return nil, r.installKubectl(ctx, version) + }) + if err != nil { + return "", true, err + } + + r.mu.Lock() + r.versions[name] = struct{}{} + r.mu.Unlock() + + return path, true, nil +} + +func (r *registry) Kustomize(ctx context.Context, version string) (string, bool, error) { + name := kustomizePrefix + if version != "" { + name = fmt.Sprintf("%s-%s", kustomizePrefix, version) + } + path := filepath.Join(r.binDir, name) + + r.mu.RLock() + _, ok := r.versions[name] + r.mu.RUnlock() + if ok { + return path, false, nil + } + + _, err, _ := r.installGroup.Do(name, func() (interface{}, error) { + return nil, r.installKustomize(ctx, version) + }) + if err != nil { + return "", true, err + } + + r.mu.Lock() + r.versions[name] = struct{}{} + r.mu.Unlock() + + return path, true, nil +} + +func (r *registry) Helm(ctx context.Context, version string) (string, bool, error) { + name := helmPrefix + if version != "" { + name = fmt.Sprintf("%s-%s", helmPrefix, version) + } + path := filepath.Join(r.binDir, name) + + r.mu.RLock() + _, ok := r.versions[name] + r.mu.RUnlock() + if ok { + return path, false, nil + } + + _, err, _ := r.installGroup.Do(name, func() (interface{}, error) { + return nil, r.installHelm(ctx, version) + }) + if err != nil { + return "", true, err + } + + r.mu.Lock() + r.versions[name] = struct{}{} + r.mu.Unlock() + + return path, true, nil +} + +func (r *registry) Terraform(ctx context.Context, version string) (string, bool, error) { + name := terraformPrefix + if version != "" { + name = fmt.Sprintf("%s-%s", terraformPrefix, version) + } + path := filepath.Join(r.binDir, name) + + r.mu.RLock() + _, ok := r.versions[name] + r.mu.RUnlock() + if ok { + return path, false, nil + } + + _, err, _ := r.installGroup.Do(name, func() (interface{}, error) { + return nil, r.installTerraform(ctx, version) + }) + if err != nil { + return "", true, err + } + + r.mu.Lock() + r.versions[name] = struct{}{} + r.mu.Unlock() + + return path, true, nil +} diff --git a/pkg/app/pipedv1/toolregistry/tool_darwin.go b/pkg/app/pipedv1/toolregistry/tool_darwin.go new file mode 100644 index 0000000000..044c90cecb --- /dev/null +++ b/pkg/app/pipedv1/toolregistry/tool_darwin.go @@ -0,0 +1,55 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toolregistry + +var kubectlInstallScript = ` +cd {{ .WorkingDir }} +curl -LO https://storage.googleapis.com/kubernetes-release/release/v{{ .Version }}/bin/darwin/amd64/kubectl +mv kubectl {{ .BinDir }}/kubectl-{{ .Version }} +chmod +x {{ .BinDir }}/kubectl-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/kubectl-{{ .Version }} {{ .BinDir }}/kubectl +{{ end }} +` + +var kustomizeInstallScript = ` +cd {{ .WorkingDir }} +curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v{{ .Version }}/kustomize_v{{ .Version }}_darwin_amd64.tar.gz | tar xvz +mv kustomize {{ .BinDir }}/kustomize-{{ .Version }} +chmod +x {{ .BinDir }}/kustomize-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/kustomize-{{ .Version }} {{ .BinDir }}/kustomize +{{ end }} +` + +var helmInstallScript = ` +cd {{ .WorkingDir }} +curl -L https://get.helm.sh/helm-v{{ .Version }}-darwin-amd64.tar.gz | tar xvz +mv darwin-amd64/helm {{ .BinDir }}/helm-{{ .Version }} +chmod +x {{ .BinDir }}/helm-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/helm-{{ .Version }} {{ .BinDir }}/helm +{{ end }} +` + +var terraformInstallScript = ` +cd {{ .WorkingDir }} +curl https://releases.hashicorp.com/terraform/{{ .Version }}/terraform_{{ .Version }}_darwin_amd64.zip -o terraform_{{ .Version }}_linux_amd64.zip +unzip terraform_{{ .Version }}_linux_amd64.zip +mv terraform {{ .BinDir }}/terraform-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/terraform-{{ .Version }} {{ .BinDir }}/terraform +{{ end }} +` diff --git a/pkg/app/pipedv1/toolregistry/tool_linux.go b/pkg/app/pipedv1/toolregistry/tool_linux.go new file mode 100644 index 0000000000..6f3d4c146c --- /dev/null +++ b/pkg/app/pipedv1/toolregistry/tool_linux.go @@ -0,0 +1,55 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toolregistry + +var kubectlInstallScript = ` +cd {{ .WorkingDir }} +curl -LO https://storage.googleapis.com/kubernetes-release/release/v{{ .Version }}/bin/linux/amd64/kubectl +mv kubectl {{ .BinDir }}/kubectl-{{ .Version }} +chmod +x {{ .BinDir }}/kubectl-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/kubectl-{{ .Version }} {{ .BinDir }}/kubectl +{{ end }} +` + +var kustomizeInstallScript = ` +cd {{ .WorkingDir }} +curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v{{ .Version }}/kustomize_v{{ .Version }}_linux_amd64.tar.gz | tar xvz +mv kustomize {{ .BinDir }}/kustomize-{{ .Version }} +chmod +x {{ .BinDir }}/kustomize-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/kustomize-{{ .Version }} {{ .BinDir }}/kustomize +{{ end }} +` + +var helmInstallScript = ` +cd {{ .WorkingDir }} +curl -L https://get.helm.sh/helm-v{{ .Version }}-linux-amd64.tar.gz | tar xvz +mv linux-amd64/helm {{ .BinDir }}/helm-{{ .Version }} +chmod +x {{ .BinDir }}/helm-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/helm-{{ .Version }} {{ .BinDir }}/helm +{{ end }} +` + +var terraformInstallScript = ` +cd {{ .WorkingDir }} +curl https://releases.hashicorp.com/terraform/{{ .Version }}/terraform_{{ .Version }}_linux_amd64.zip -o terraform_{{ .Version }}_linux_amd64.zip +unzip terraform_{{ .Version }}_linux_amd64.zip +mv terraform {{ .BinDir }}/terraform-{{ .Version }} +{{ if .AsDefault }} +cp -f {{ .BinDir }}/terraform-{{ .Version }} {{ .BinDir }}/terraform +{{ end }} +`