diff --git a/api/go.mod b/api/go.mod index ce4aef76c..2af43091b 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,25 +4,46 @@ go 1.17 require ( github.com/fluxcd/pkg/apis/acl v0.0.3 - github.com/fluxcd/pkg/apis/meta v0.10.2 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 + // TODO(hidde): introduction of the runtime package is temporary, and the dependency should be removed as soon as + // all APIs have been updated to the runtime standards (more specifically; have dropped their condition modifying + // functions). + github.com/fluxcd/pkg/runtime v0.13.0-rc.6 k8s.io/apimachinery v0.23.1 sigs.k8s.io/controller-runtime v0.11.0 ) require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.6 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/gomega v1.17.0 // indirect + github.com/pkg/errors v0.9.1 // indirect golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect + golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.27.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + k8s.io/api v0.23.0 // indirect + k8s.io/client-go v0.23.0 // indirect k8s.io/klog/v2 v2.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index ffe31a8af..927fd8a67 100644 --- a/api/go.sum +++ b/api/go.sum @@ -68,6 +68,7 @@ github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiU github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= @@ -76,7 +77,9 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -118,13 +121,16 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6 h1:MsxiKYGsuRzEvyreQG5ocNaIZDwKhqvQ711/w4rTkCo= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -148,6 +154,7 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -194,6 +201,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -236,6 +244,7 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -250,9 +259,11 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -270,6 +281,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -309,6 +321,7 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -359,6 +372,7 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -369,21 +383,25 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -466,15 +484,18 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4 go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -584,6 +605,7 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -663,6 +685,7 @@ golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -678,6 +701,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -743,6 +767,7 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -772,6 +797,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -852,6 +878,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -895,11 +922,13 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo= k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= +k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= @@ -908,6 +937,7 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 4df79c2e1..9b9d8e0d8 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -17,7 +17,8 @@ limitations under the License. package v1beta1 import ( - apimeta "k8s.io/apimachinery/pkg/api/meta" + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" @@ -29,10 +30,17 @@ const ( BucketKind = "Bucket" ) +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" + AzureBlobProvider string = "azure" +) + // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). - // +kubebuilder:validation:Enum=generic;aws;gcp + // +kubebuilder:validation:Enum=generic;aws;gcp;azure // +kubebuilder:default:=generic // +optional Provider string `json:"provider,omitempty"` @@ -82,12 +90,6 @@ type BucketSpec struct { AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } -const ( - GenericBucketProvider string = "generic" - AmazonBucketProvider string = "aws" - GoogleBucketProvider string = "gcp" -) - // BucketStatus defines the observed state of a bucket type BucketStatus struct { // ObservedGeneration is the last observed generation. @@ -119,61 +121,38 @@ const ( BucketOperationFailedReason string = "BucketOperationFailed" ) -// BucketProgressing resets the conditions of the Bucket to metav1.Condition of -// type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason -// reason and message. It returns the modified Bucket. -func BucketProgressing(bucket Bucket) Bucket { - bucket.Status.ObservedGeneration = bucket.Generation - bucket.Status.URL = "" - bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") - return bucket +// GetConditions returns the status conditions of the object. +func (in Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions } -// BucketReady sets the given Artifact and URL on the Bucket and sets the -// meta.ReadyCondition to 'True', with the given reason and message. It returns -// the modified Bucket. -func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { - bucket.Status.Artifact = &artifact - bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) - return bucket +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions } -// BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with -// the given reason and message. It returns the modified Bucket. -func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) - return bucket +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration } -// BucketReadyMessage returns the message of the metav1.Condition of type -// meta.ReadyCondition with status 'True' if present, or an empty string. -func BucketReadyMessage(bucket Bucket) string { - if c := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *Bucket) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *Bucket) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *Bucket) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/condition_types.go b/api/v1beta1/condition_types.go index 4077a2ab6..1a9db62ad 100644 --- a/api/v1beta1/condition_types.go +++ b/api/v1beta1/condition_types.go @@ -18,6 +18,22 @@ package v1beta1 const SourceFinalizer = "finalizers.fluxcd.io" +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, and the Artifact available for the + // Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" +) + const ( // URLInvalidReason represents the fact that a given source has an invalid URL. URLInvalidReason string = "URLInvalid" diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index c1014e6b7..145ac6bb0 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -17,11 +17,14 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -34,6 +37,22 @@ const ( LibGit2Implementation = "libgit2" ) +const ( + // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + CheckoutFailedCondition string = "CheckoutFailed" + + // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check + // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. + SourceVerifiedCondition string = "SourceVerified" + + // IncludeUnavailableCondition indicates one of the includes is not available. For example, because it does not + // exist, or does not have an Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" +) + // GitRepositorySpec defines the desired state of a Git repository. type GitRepositorySpec struct { // The repository URL, can be a HTTP/S or SSH address. @@ -42,10 +61,8 @@ type GitRepositorySpec struct { URL string `json:"url"` // The secret name containing the Git credentials. - // For HTTPS repositories the secret must contain username and password - // fields. - // For SSH repositories the secret must contain identity, identity.pub and - // known_hosts fields. + // For HTTPS repositories the secret must contain username and password fields. + // For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` @@ -63,16 +80,16 @@ type GitRepositorySpec struct { // +optional Reference *GitRepositoryRef `json:"ref,omitempty"` - // Verify OpenPGP signature for the Git commit HEAD points to. + // Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. // +optional Verification *GitRepositoryVerification `json:"verify,omitempty"` - // Ignore overrides the set of excluded patterns in the .sourceignore format - // (which is the same as .gitignore). If not provided, a default will be used, - // consult the documentation for your version to find out what those are. + // Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). + // If not provided, a default will be used, consult the documentation for your version to find out what those are. // +optional Ignore *string `json:"ignore,omitempty"` + // Suspend tells the controller to suspend the reconciliation of this source. // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` @@ -84,13 +101,13 @@ type GitRepositorySpec struct { // +optional GitImplementation string `json:"gitImplementation,omitempty"` - // When enabled, after the clone is created, initializes all submodules within, - // using their default settings. + // When enabled, after the clone is created, initializes all submodules within, using their default settings. // This option is available only when using the 'go-git' GitImplementation. // +optional RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` - // Extra git repositories to map into the repository + // Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for + // this resource. Include []GitRepositoryInclude `json:"include,omitempty"` // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. @@ -144,11 +161,11 @@ type GitRepositoryRef struct { // GitRepositoryVerification defines the OpenPGP signature verification process. type GitRepositoryVerification struct { - // Mode describes what git object should be verified, currently ('head'). + // Mode describes what Git object should be verified, currently ('head'). // +kubebuilder:validation:Enum=head Mode string `json:"mode"` - // The secret name containing the public keys of all trusted Git authors. + // SecretRef containing the public keys of all trusted Git authors. SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` } @@ -162,8 +179,7 @@ type GitRepositoryStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the artifact output of the last repository - // sync. + // URL is the download link for the artifact output of the last repository sync. // +optional URL string `json:"url,omitempty"` @@ -179,12 +195,10 @@ type GitRepositoryStatus struct { } const ( - // GitOperationSucceedReason represents the fact that the git clone, pull - // and checkout operations succeeded. + // GitOperationSucceedReason represents the fact that the git clone, pull and checkout operations succeeded. GitOperationSucceedReason string = "GitOperationSucceed" - // GitOperationFailedReason represents the fact that the git clone, pull or - // checkout operations failed. + // GitOperationFailedReason represents the fact that the git clone, pull or checkout operations failed. GitOperationFailedReason string = "GitOperationFailed" ) @@ -196,7 +210,7 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -207,7 +221,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt repository.Status.Artifact = &artifact repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -215,7 +229,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -230,22 +244,38 @@ func GitRepositoryReadyMessage(repository GitRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in GitRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *GitRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *GitRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *GitRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 51c04781d..76ed4914b 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -17,11 +17,14 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) // HelmChartKind is the string representation of a HelmChart. @@ -152,7 +155,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&chart, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return chart } @@ -162,7 +165,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&chart, meta.ReadyCondition, reason, message) return chart } @@ -170,7 +173,7 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&chart, meta.ReadyCondition, reason, message) return chart } @@ -185,22 +188,32 @@ func HelmChartReadyMessage(chart HelmChart) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. -func (in *HelmChart) GetArtifact() *Artifact { - return in.Status.Artifact +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions } -// GetStatusConditions returns a pointer to the Status.Conditions slice -func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { - return &in.Status.Conditions +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration } -// GetInterval returns the interval at which the source is updated. -func (in *HelmChart) GetInterval() metav1.Duration { +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *HelmChart) GetArtifact() *Artifact { + return in.Status.Artifact +} + // GetValuesFiles returns a merged list of ValuesFiles. func (in *HelmChart) GetValuesFiles() []string { valuesFiles := in.Spec.ValuesFiles @@ -212,6 +225,12 @@ func (in *HelmChart) GetValuesFiles() []string { return valuesFiles } +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 0af0d4cf6..cd585f2d0 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -17,11 +17,14 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -113,7 +116,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -123,7 +126,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -131,7 +134,7 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -146,22 +149,38 @@ func HelmRepositoryReadyMessage(repository HelmRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in HelmRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *HelmRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *HelmRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/source.go b/api/v1beta1/source.go index 5c10d00fc..1bf8bd975 100644 --- a/api/v1beta1/source.go +++ b/api/v1beta1/source.go @@ -17,7 +17,10 @@ limitations under the License. package v1beta1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) const ( @@ -29,9 +32,13 @@ const ( // Source interface must be supported by all API types. // +k8s:deepcopy-gen=false type Source interface { + runtime.Object + // GetRequeueAfter returns the duration after which the source must be reconciled again. + GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. GetArtifact() *Artifact // GetInterval returns the interval at which the source is updated. + // Deprecated: use GetRequeueAfter instead. GetInterval() metav1.Duration } diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index d5e4f4892..79c2bdbb7 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index f613db849..5ec9f6420 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 creationTimestamp: null name: buckets.source.toolkit.fluxcd.io spec: @@ -35,28 +35,42 @@ spec: description: Bucket is the Schema for the buckets API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: BucketSpec defines the desired state of an S3 compatible bucket + description: BucketSpec defines the desired state of an S3 compatible + bucket properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -70,7 +84,10 @@ spec: description: The bucket endpoint address. type: string ignore: - description: Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). If not provided, a default will be used, consult the documentation for your version to find out what those are. + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. type: string insecure: description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. @@ -85,21 +102,24 @@ spec: - generic - aws - gcp + - azure type: string region: description: The bucket region. type: string secretRef: - description: The name of the secret containing authentication credentials for the Bucket. + description: The name of the secret containing authentication credentials + for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: This flag tells the controller to suspend the reconciliation + of this source. type: boolean timeout: default: 20s @@ -116,20 +136,24 @@ spec: description: BucketStatus defines the observed state of a bucket properties: artifact: - description: Artifact represents the output of the last successful Bucket sync. + description: Artifact represents the output of the last successful + Bucket sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -141,23 +165,45 @@ spec: conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -170,7 +216,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -183,14 +233,17 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer url: - description: URL is the download link for the artifact output of the last Bucket sync. + description: URL is the download link for the artifact output of the + last Bucket sync. type: string type: object type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 4f5de9a06..d4759dfc5 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 creationTimestamp: null name: gitrepositories.source.toolkit.fluxcd.io spec: @@ -37,10 +37,14 @@ spec: description: GitRepository is the Schema for the gitrepositories API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -48,17 +52,26 @@ spec: description: GitRepositorySpec defines the desired state of a Git repository. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -67,33 +80,41 @@ spec: type: object gitImplementation: default: go-git - description: Determines which git client library to use. Defaults to go-git, valid values are ('go-git', 'libgit2'). + description: Determines which git client library to use. Defaults + to go-git, valid values are ('go-git', 'libgit2'). enum: - go-git - libgit2 type: string ignore: - description: Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). If not provided, a default will be used, consult the documentation for your version to find out what those are. + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. type: string include: - description: Extra git repositories to map into the repository + description: Include defines a list of GitRepository resources which + artifacts should be included in the artifact produced for this resource. items: - description: GitRepositoryInclude defines a source with a from and to path. + description: GitRepositoryInclude defines a source with a from and + to path. properties: fromPath: - description: The path to copy contents from, defaults to the root directory. + description: The path to copy contents from, defaults to the + root directory. type: string repository: description: Reference to a GitRepository to include. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object toPath: - description: The path to copy contents to, defaults to the name of the source ref. + description: The path to copy contents to, defaults to the name + of the source ref. type: string required: - repository @@ -103,57 +124,71 @@ spec: description: The interval at which to check for repository updates. type: string recurseSubmodules: - description: When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the 'go-git' GitImplementation. + description: When enabled, after the clone is created, initializes + all submodules within, using their default settings. This option + is available only when using the 'go-git' GitImplementation. type: boolean ref: - description: The Git reference to checkout and monitor for changes, defaults to master branch. + description: The Git reference to checkout and monitor for changes, + defaults to master branch. properties: branch: description: The Git branch to checkout, defaults to master. type: string commit: - description: The Git commit SHA to checkout, if specified Tag filters will be ignored. + description: The Git commit SHA to checkout, if specified Tag + filters will be ignored. type: string semver: - description: The Git tag semver expression, takes precedence over Tag. + description: The Git tag semver expression, takes precedence over + Tag. type: string tag: description: The Git tag to checkout, takes precedence over Branch. type: string type: object secretRef: - description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain identity, identity.pub and known_hosts fields. + description: The secret name containing the Git credentials. For HTTPS + repositories the secret must contain username and password fields. + For SSH repositories the secret must contain 'identity', 'identity.pub' + and 'known_hosts' fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: Suspend tells the controller to suspend the reconciliation + of this source. This flag tells the controller to suspend the reconciliation + of this source. type: boolean timeout: default: 20s - description: The timeout for remote Git operations like cloning, defaults to 20s. + description: The timeout for remote Git operations like cloning, defaults + to 20s. type: string url: description: The repository URL, can be a HTTP/S or SSH address. pattern: ^(http|https|ssh):// type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points to. + description: Verification defines the configuration to verify the + OpenPGP signature for the Git commit HEAD points to. properties: mode: - description: Mode describes what git object should be verified, currently ('head'). + description: Mode describes what Git object should be verified, + currently ('head'). enum: - head type: string secretRef: - description: The secret name containing the public keys of all trusted Git authors. + description: SecretRef containing the public keys of all trusted + Git authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -171,20 +206,24 @@ spec: description: GitRepositoryStatus defines the observed state of a Git repository. properties: artifact: - description: Artifact represents the output of the last successful repository sync. + description: Artifact represents the output of the last successful + repository sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -196,23 +235,45 @@ spec: conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -225,7 +286,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -238,7 +303,8 @@ spec: type: object type: array includedArtifacts: - description: IncludedArtifacts represents the included artifacts from the last successful repository sync. + description: IncludedArtifacts represents the included artifacts from + the last successful repository sync. items: description: Artifact represents the output of a source synchronisation. properties: @@ -246,14 +312,17 @@ spec: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -264,14 +333,17 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer url: - description: URL is the download link for the artifact output of the last repository sync. + description: URL is the download link for the artifact output of the + last repository sync. type: string type: object type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index b45e88211..5b7b10427 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 creationTimestamp: null name: helmcharts.source.toolkit.fluxcd.io spec: @@ -46,10 +46,14 @@ spec: description: HelmChart is the Schema for the helmcharts API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -57,17 +61,26 @@ spec: description: HelmChartSpec defines the desired state of a Helm chart. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -75,14 +88,18 @@ spec: - namespaceSelectors type: object chart: - description: The name or path the Helm chart is available at in the SourceRef. + description: The name or path the Helm chart is available at in the + SourceRef. type: string interval: description: The interval at which to check the Source for updates. type: string reconcileStrategy: default: ChartVersion - description: Determines what enables the creation of a new artifact. Valid values are ('ChartVersion', 'Revision'). See the documentation of the values for an explanation on their behavior. Defaults to ChartVersion when omitted. + description: Determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). See the documentation + of the values for an explanation on their behavior. Defaults to + ChartVersion when omitted. enum: - ChartVersion - Revision @@ -94,7 +111,8 @@ spec: description: APIVersion of the referent. type: string kind: - description: Kind of the referent, valid values are ('HelmRepository', 'GitRepository', 'Bucket'). + description: Kind of the referent, valid values are ('HelmRepository', + 'GitRepository', 'Bucket'). enum: - HelmRepository - GitRepository @@ -108,19 +126,28 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: This flag tells the controller to suspend the reconciliation + of this source. type: boolean valuesFile: - description: Alternative values file to use as the default chart values, expected to be a relative path in the SourceRef. Deprecated in favor of ValuesFiles, for backwards compatibility the file defined here is merged before the ValuesFiles items. Ignored when omitted. + description: Alternative values file to use as the default chart values, + expected to be a relative path in the SourceRef. Deprecated in favor + of ValuesFiles, for backwards compatibility the file defined here + is merged before the ValuesFiles items. Ignored when omitted. type: string valuesFiles: - description: Alternative list of values files to use as the chart values (values.yaml is not included by default), expected to be a relative path in the SourceRef. Values files are merged in the order of this list with the last file overriding the first. Ignored when omitted. + description: Alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be + a relative path in the SourceRef. Values files are merged in the + order of this list with the last file overriding the first. Ignored + when omitted. items: type: string type: array version: default: '*' - description: The chart version semver expression, ignored for charts from GitRepository and Bucket sources. Defaults to latest when omitted. + description: The chart version semver expression, ignored for charts + from GitRepository and Bucket sources. Defaults to latest when omitted. type: string required: - chart @@ -133,20 +160,24 @@ spec: description: HelmChartStatus defines the observed state of the HelmChart. properties: artifact: - description: Artifact represents the output of the last successful chart sync. + description: Artifact represents the output of the last successful + chart sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -158,23 +189,45 @@ spec: conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -187,7 +240,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -200,7 +257,9 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index bcce23a7f..f755152a2 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 creationTimestamp: null name: helmrepositories.source.toolkit.fluxcd.io spec: @@ -37,10 +37,14 @@ spec: description: HelmRepository is the Schema for the helmrepositories API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -48,17 +52,26 @@ spec: description: HelmRepositorySpec defines the reference to a Helm repository. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -69,26 +82,36 @@ spec: description: The interval at which to check the upstream for updates. type: string passCredentials: - description: PassCredentials allows the credentials from the SecretRef to be passed on to a host that does not match the host as defined in URL. This may be required if the host of the advertised chart URLs in the index differ from the defined URL. Enabling this should be done with caution, as it can potentially result in credentials getting stolen in a MITM-attack. + description: PassCredentials allows the credentials from the SecretRef + to be passed on to a host that does not match the host as defined + in URL. This may be required if the host of the advertised chart + URLs in the index differ from the defined URL. Enabling this should + be done with caution, as it can potentially result in credentials + getting stolen in a MITM-attack. type: boolean secretRef: - description: The name of the secret containing authentication credentials for the Helm repository. For HTTP/S basic auth the secret must contain username and password fields. For TLS the secret must contain a certFile and keyFile, and/or caCert fields. + description: The name of the secret containing authentication credentials + for the Helm repository. For HTTP/S basic auth the secret must contain + username and password fields. For TLS the secret must contain a + certFile and keyFile, and/or caCert fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: This flag tells the controller to suspend the reconciliation + of this source. type: boolean timeout: default: 60s description: The timeout of index downloading, defaults to 60s. type: string url: - description: The Helm repository URL, a valid URL contains at least a protocol and host. + description: The Helm repository URL, a valid URL contains at least + a protocol and host. type: string required: - interval @@ -100,20 +123,24 @@ spec: description: HelmRepositoryStatus defines the observed state of the HelmRepository. properties: artifact: - description: Artifact represents the output of the last successful repository sync. + description: Artifact represents the output of the last successful + repository sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -125,23 +152,45 @@ spec: conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -154,7 +203,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -167,7 +220,9 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/controllers/artifact.go b/controllers/artifact.go index 0e16fd03c..fa21bd0a4 100644 --- a/controllers/artifact.go +++ b/controllers/artifact.go @@ -1,9 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +type artifactSet []*sourcev1.Artifact + +// Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. +func (s artifactSet) Diff(set artifactSet) bool { + if len(s) != len(set) { + return true + } + +outer: + for _, j := range s { + for _, k := range set { + if k.HasRevision(j.Revision) { + continue outer + } + } + return true + } + return false +} + // hasArtifactUpdated returns true if any of the revisions in the current artifacts // does not match any of the artifacts in the updated artifacts +// NOTE: artifactSet is a replacement for this. Remove this once it's not used +// anywhere. func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool { if len(current) != len(updated) { return true diff --git a/controllers/artifact_matchers_test.go b/controllers/artifact_matchers_test.go new file mode 100644 index 000000000..63ff81ced --- /dev/null +++ b/controllers/artifact_matchers_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +// MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. +func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { + return &matchArtifact{ + expected: expected, + } +} + +type matchArtifact struct { + expected *sourcev1.Artifact +} + +func (m matchArtifact) Match(actual interface{}) (success bool, err error) { + actualArtifact, ok := actual.(*sourcev1.Artifact) + if !ok { + return false, fmt.Errorf("actual should be a pointer to an Artifact") + } + + if ok, _ := BeNil().Match(m.expected); ok { + return BeNil().Match(actual) + } + + if ok, err = Equal(m.expected.Path).Match(actualArtifact.Path); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok { + return ok, err + } + + return ok, err +} + +func (m matchArtifact) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto match\n\t%#v\n", actual, m.expected) +} + +func (m matchArtifact) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto not match\n\t%#v\n", actual, m.expected) +} diff --git a/controllers/artifact_test.go b/controllers/artifact_test.go index 959661615..935c93bf7 100644 --- a/controllers/artifact_test.go +++ b/controllers/artifact_test.go @@ -1,26 +1,40 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "testing" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestHasUpdated(t *testing.T) { +func Test_artifactSet_Diff(t *testing.T) { tests := []struct { name string - current []*sourcev1.Artifact - updated []*sourcev1.Artifact + current artifactSet + updated artifactSet expected bool }{ { - name: "not updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, no diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -28,13 +42,13 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "bar", }, @@ -42,8 +56,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "not updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, no diff", + current: artifactSet{ { Revision: "foo", }, @@ -51,7 +65,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -62,8 +76,8 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, diff", + current: artifactSet{ { Revision: "foo", }, @@ -71,7 +85,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -82,8 +96,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "updated different artifact count", - current: []*sourcev1.Artifact{ + name: "different artifact count", + current: artifactSet{ { Revision: "foo", }, @@ -91,7 +105,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -101,7 +115,7 @@ func TestHasUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := hasArtifactUpdated(tt.current, tt.updated) + result := tt.current.Diff(tt.updated) if result != tt.expected { t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected) } diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index a25587d1a..bd27a1ef2 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -18,40 +18,79 @@ package controllers import ( "context" - "crypto/sha1" + "crypto/sha256" + "errors" "fmt" + "net/url" "os" "path/filepath" + "sort" "strings" "time" + gcpstorage "cloud.google.com/go/storage" + storagemgmt "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/go-autorest/autorest/to" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" + "github.com/fluxcd/source-controller/pkg/azure/cloudprovider" + "github.com/fluxcd/source-controller/pkg/gcp" + "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/gcp" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + serror "github.com/fluxcd/source-controller/internal/error" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/pkg/sourceignore" ) +// Status conditions owned by Bucket reconciler. +var bucketOwnedConditions = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, +} + +// Conditions that Ready condition is influenced by in descending order of their +// priority. +var bucketReadyDeps = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + +// Negative conditions that Ready condition is influenced by. +var bucketReadyDepsNegative = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete @@ -60,17 +99,21 @@ import ( // BucketReconciler reconciles a Bucket object type BucketReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage + AzureCloudConfig string } type BucketReconcilerOptions struct { MaxConcurrentReconciles int } +// bucketReconcilerFunc is the function type for all the bucket reconciler +// functions. +type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) + func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) } @@ -83,529 +126,983 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc Complete(r) } -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + // Fetch the Bucket + obj := &sourcev1.Bucket{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, bucket) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(bucket.DeepCopy()) - controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &bucket, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !bucket.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, bucket) - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Return early if the object is suspended. - if bucket.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // set initial status - if resetBucket, ok := r.resetStatus(bucket); ok { - bucket = resetBucket - if err := r.updateStatus(ctx, req, bucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, bucket) - } + var recResult sreconcile.Result - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok { - bucket.Status.SetLastHandledReconcileRequest(v) - } + // Always attempt to patch the object and status after each reconciliation + // NOTE: This deferred block only modifies the named return error. The + // result from the reconciliation remains the same. Any requeue attributes + // set in the result will continue to be effective. + defer func() { + retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) - // purge old artifacts from storage - if err := r.gc(bucket); err != nil { - log.Error(err, "unable to purge old artifacts") - } + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() - // reconcile bucket by downloading its content - reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return ctrl.Result{Requeue: true}, nil + } - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + res, err := r.reconcileDelete(ctx, obj) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) } - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledBucket) - return ctrl.Result{Requeue: true}, reconcileErr + // Reconcile actual object + reconcilers := []bucketReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, } + recResult, err = r.reconcile(ctx, obj, reconcilers) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) +} - // emit revision change event - if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { - r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) +// summarizeAndPatch analyzes the object conditions to create a summary of the +// status conditions and patches the object with the calculated summary. +func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.Bucket, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { + // Record the value of the reconciliation request if any. + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) } - r.recordReadiness(ctx, reconciledBucket) - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - bucket.GetInterval().Duration.String(), - )) + // Compute the reconcile results, obtain patch options and reconcile error. + var patchOpts []patch.Option + patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, bucketOwnedConditions) + + // Summarize the Ready condition based on abnormalities that may have been observed. + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + bucketReadyDeps..., + ), + conditions.WithNegativePolarityConditions( + bucketReadyDepsNegative..., + ), + ) + + // Finally, patch the resource. + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } - return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil + return recErr } -func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - var err error - var sourceBucket sourcev1.Bucket +// reconcile steps iterates through the actual reconciliation tasks for objec, +// it returns early on the first step that returns ResultRequeue or produces an +// error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "Reconciling new generation %d", obj.Generation) + } + + var artifact sourcev1.Artifact - tempDir, err := os.MkdirTemp("", bucket.Name) + // Create temp working dir + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } - defer func() { - if err := os.RemoveAll(tempDir); err != nil { - log.Error(err, "failed to remove working directory", "path", tempDir) + defer os.RemoveAll(tmpDir) + + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &artifact, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil } - }() - - if bucket.Spec.Provider == sourcev1.GoogleBucketProvider { - sourceBucket, err = r.reconcileWithGCP(ctx, bucket, tempDir) + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. if err != nil { - return sourceBucket, err - } - } else { - sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err + resErr = err + res = recResult + break } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) } - revision, err := r.checksum(tempDir) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + return res, resErr +} + +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkReconciling(obj, "NoArtifact", "No artifact for resource in storage") + return sreconcile.ResultSuccess, nil } - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) - if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != bucket.GetArtifact().URL { - r.Storage.SetArtifactURL(bucket.GetArtifact()) - bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the +// result. +// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret +// fails, it records v1beta1.FetchFailedCondition=True and returns early. +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + var secret *corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", secretName.String(), err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return error as the world as observed may change + return sreconcile.ResultEmpty, e } - return bucket, nil } - // create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + switch obj.Spec.Provider { + case sourcev1.GoogleBucketProvider: + return r.reconcileGCPSource(ctx, obj, artifact, secret, dir) + case sourcev1.AzureBlobProvider: + return r.reconcileWithAzureBlob(ctx, obj, artifact, secret, dir) + default: + return r.reconcileMinioSource(ctx, obj, artifact, secret, dir) } +} - // acquire lock - unlock, err := r.Storage.Lock(artifact) +// reconcileMinioSource ensures the upstream Minio client compatible bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.FetchFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (sreconcile.Result, error) { + // Build the client with the configuration from the object and secret + s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to construct S3 client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + // Return error as the contents of the secret may change + return sreconcile.ResultEmpty, e } - defer unlock() - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tempDir, nil); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + if !exists { + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { + if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + } + ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } + // In-spec patterns take precedence + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) + } + matcher := sourceignore.NewMatcher(ps) - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil -} + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + for object := range s3Client.ListObjects(ctxTimeout, obj.Spec.BucketName, minio.ListObjectsOptions{ + Recursive: true, + UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), + }) { + if err = object.Err; err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } -func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) { - if err := r.gc(bucket); err != nil { - r.event(ctx, bucket, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + // Ignore directories and the .sourceignore file + if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { + continue + } + // Ignore matches + if matcher.Match(strings.Split(object.Key, "/"), false) { + continue + } + + index[object.Key] = object.ETag } - // Record deleted status - r.recordReadiness(ctx, bucket) + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, + } + } - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - return ctrl.Result{}, err + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath, minio.GetObjectOptions{}); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } + conditions.Delete(obj, sourcev1.FetchFailedCondition) - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return sreconcile.ResultSuccess, nil } -// reconcileWithGCP handles getting objects from a Google Cloud Platform bucket -// using a gcp client -func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - gcpClient, err := r.authGCP(ctx, bucket) +// reconcileWithAzureBlob ensures the upstream Azure Storage Account Blob can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The blob contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.FetchFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileWithAzureBlob(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (sreconcile.Result, error) { + // Build the client with the configuration from the object and secret + blobContainer, err := r.buildAzureClient(ctx, obj, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to construct Azure Blob client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + // Return error as the contents of the secret may change + return sreconcile.ResultEmpty, e } - defer gcpClient.Close(log) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - exists, err := gcpClient.BucketExists(ctxTimeout, bucket.Spec.BucketName) + // Confirm bucket exists + _, err = blobContainer.GetProperties(ctxTimeout, azblob.LeaseAccessConditions{}) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + if resp, ok := err.(azblob.StorageError); ok && resp.ServiceCode() == "ContainerNotFound" { + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + + downloadBlobToLocalFile := func(ctx context.Context, containerURL azblob.ContainerURL, blobName, localPath string) error { + blobURL := containerURL.NewBlobURL(blobName) + + blobProps, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + if err != nil { + return err + } + + content := make([]byte, blobProps.ContentLength()) + err = azblob.DownloadBlobToBuffer(ctx, blobURL, 0, 0, content, azblob.DownloadFromBlobOptions{}) + if err != nil { + return err + } + + dir := filepath.Dir(localPath) + err = os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create local directory '%s': %w", dir, err) + } + + err = os.WriteFile(localPath, content, 0600) + return err } - // Look for file with ignore rules first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { - if err == gcp.ErrorObjectDoesNotExist && sourceignore.IgnoreFile != ".sourceignore" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := downloadBlobToLocalFile(ctxTimeout, *blobContainer, sourceignore.IgnoreFile, path); err != nil { + if resp, ok := err.(azblob.StorageError); ok && resp.ServiceCode() != "BlobNotFound" { + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - objects := gcpClient.ListObjects(ctxTimeout, bucket.Spec.BucketName, nil) - // download bucket content + + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + // download blob content + marker := azblob.Marker{} for { - object, err := objects.Next() - if err == gcp.IteratorDone { + resp, err := blobContainer.ListBlobsFlatSegment(ctxTimeout, marker, azblob.ListBlobsSegmentOptions{ + Prefix: "", + }) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + + if len(resp.Segment.BlobItems) == 0 { break } - if err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + + for i := range resp.Segment.BlobItems { + object := resp.Segment.BlobItems[i] + // Ignore directories and the .sourceignore file + if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { + continue + } + // Ignore matches + if matcher.Match(strings.Split(object.Name, "/"), false) { + continue + } + + index[object.Name] = string(object.Properties.Etag) } - if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { - continue + if resp.Marker == nil { + break } - if matcher.Match(strings.Split(object.Name, "/"), false) { - continue + marker.Val = resp.Marker + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, } + } - localPath := filepath.Join(tempDir, object.Name) - if err = gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Name, localPath); err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctxTimeout) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := downloadBlobToLocalFile(ctxTimeout, *blobContainer, k, localPath); err != nil { + blobURL := blobContainer.URL() + return fmt.Errorf("failed to get '%s' file from blob container '%s': %w", k, blobURL.String(), err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return sourcev1.Bucket{}, nil + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return sreconcile.ResultSuccess, nil } -// reconcileWithMinio handles getting objects from an S3 compatible bucket -// using a minio client -func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - s3Client, err := r.authMinio(ctx, bucket) +// reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (sreconcile.Result, error) { + gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to construct GCP client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + // Return error as the contents of the secret may change + return sreconcile.ResultEmpty, e } + defer gcpClient.Close(ctrl.LoggerFrom(ctx)) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Look for file with ignore rules first - // NB: S3 has flat filepath keys making it impossible to look - // for files in "subdirectories" without building up a tree first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { - if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { + if err != gcpstorage.ErrObjectNotExist { + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - // download bucket content - for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), - }) { - if object.Err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + objects := gcpClient.ListObjects(ctxTimeout, obj.Spec.BucketName, nil) + for { + object, err := objects.Next() + if err != nil { + if err == gcp.IteratorDone { + break + } + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { + if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { continue } - if matcher.Match(strings.Split(object.Key, "/"), false) { + if matcher.Match(strings.Split(object.Name, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Key) - err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) - if err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + index[object.Name] = object.Etag + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, + } + } + + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return sourcev1.Bucket{}, nil + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return sreconcile.ResultSuccess, nil } -// authGCP creates a new Google Cloud Platform storage client -// to interact with the storage service. -func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) { - var client *gcp.GCPClient - var err error - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// If the given artifact does not differ from the object's current, it returns early. +// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is +// updated to its path. +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "stored artifact for revision '%s'", artifact.Revision) } + }() - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) { + r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new upstream revision '%s'", artifact.Revision) + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to stat source path: %w", err), + Reason: sourcev1.StorageOperationFailedReason, } - if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { - return nil, err + } else if !f.IsDir() { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("source path '%s' is not a directory", dir), + Reason: sourcev1.StorageOperationFailedReason, } - client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) - if err != nil { - return nil, err + } + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(*artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, } - } else { - client, err = gcp.NewClient(ctx) - if err != nil { - return nil, err + } + unlock, err := r.Storage.Lock(*artifact) + if err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, } } - return client, nil + defer unlock() + + // Archive directory to storage + if err := r.Storage.Archive(artifact, dir, nil); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to archive artifact to storage: %s", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") + if err != nil { + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) + } + if url != "" { + obj.Status.URL = url + } + return sreconcile.ResultSuccess, nil } -// authMinio creates a new Minio client to interact with S3 -// compatible storage services. -func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, +// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +// func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err } - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) +// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err), + Reason: "GarbageCollectionFailed", + } } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + return nil + } + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %s", err), + Reason: "GarbageCollectionFailed", + } + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "garbage collected old artifacts") + } + return nil +} - accesskey := "" - secretkey := "" +// buildMinioClient constructs a minio.Client with the data from the given object and Secret. +// It returns an error if the Secret does not have the required fields, or if there is no credential handler +// configured. +func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1.Secret) (*minio.Client, error) { + opts := minio.Options{ + Region: obj.Spec.Region, + Secure: !obj.Spec.Insecure, + } + if secret != nil { + var accessKey, secretKey string if k, ok := secret.Data["accesskey"]; ok { - accesskey = string(k) + accessKey = string(k) } if k, ok := secret.Data["secretkey"]; ok { - secretkey = string(k) + secretKey = string(k) } - if accesskey == "" || secretkey == "" { + if accessKey == "" || secretKey == "" { return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) } - opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") + opts.Creds = credentials.NewStaticV4(accessKey, secretKey, "") + } else if obj.Spec.Provider == sourcev1.AmazonBucketProvider { + opts.Creds = credentials.NewIAM("") } + return minio.New(obj.Spec.Endpoint, &opts) +} - if opt.Creds == nil { - return nil, fmt.Errorf("no bucket credentials found") +// buildAzureClient constructs an azblob.ContainerURL object with the data from the given Secret. +// It returns an error if the Secret does not have the required field, or if the client construction fails. +func (r *BucketReconciler) buildAzureClient(ctx context.Context, obj *sourcev1.Bucket, secret *corev1.Secret) (*azblob.ContainerURL, error) { + ep := strings.TrimRight(obj.Spec.Endpoint, "/") + blobContainerURLString := strings.Join([]string{ep, obj.Spec.BucketName}, "/") + endpointURL, err := url.Parse(blobContainerURLString) + if err != nil { + return nil, fmt.Errorf("invalid endpoint: '%s': %w", obj.Spec.Endpoint, err) } + accountName := strings.Split(endpointURL.Hostname(), ".")[0] - return minio.New(bucket.Spec.Endpoint, &opt) -} - -// checksum calculates the SHA1 checksum of the given root directory. -// It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the -// list with relative file paths and their checksums. -func (r *BucketReconciler) checksum(root string) (string, error) { - sum := sha1.New() - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err + var key string + var resourceId string + if secret != nil { + if k, ok := secret.Data["accesskey"]; ok { + resourceId = string(k) } - if !info.Mode().IsRegular() { - return nil + if k, ok := secret.Data["secretkey"]; ok { + key = string(k) } - data, err := os.ReadFile(path) + } + + if key == "" && resourceId != "" { + cloudProvider, err := cloudprovider.NewCloudProvider(r.AzureCloudConfig) if err != nil { - return err + return nil, fmt.Errorf("unable to load azure cloud config '%s': %w", r.AzureCloudConfig, err) } - relPath, err := filepath.Rel(root, path) + + tokens := strings.Split(resourceId, "/") + subscriptionId := tokens[2] + rg := tokens[4] + client := storagemgmt.NewAccountsClient(subscriptionId) + client.Authorizer = cloudProvider.Authorizer + + keys, err := client.ListKeys(ctx, rg, accountName, "") if err != nil { - return err + return nil, fmt.Errorf("unable to list keys for storage account '%s': %w", resourceId, err) } - sum.Write([]byte(fmt.Sprintf("%x %s\n", sha1.Sum(data), relPath))) - return nil - }); err != nil { - return "", err - } - return fmt.Sprintf("%x", sum.Sum(nil)), nil -} -// resetStatus returns a modified v1beta1.Bucket and a boolean indicating -// if the status field has been reset. -func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { - // We do not have an artifact, or it does no longer exist - if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) { - bucket = sourcev1.BucketProgressing(bucket) - bucket.Status.Artifact = nil - return bucket, true - } - if bucket.Generation != bucket.Status.ObservedGeneration { - return sourcev1.BucketProgressing(bucket), true + key = to.String((*keys.Keys)[0].Value) } - return bucket, false -} -// gc performs a garbage collection for the given v1beta1.Bucket. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { - if !bucket.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*")) - } - if bucket.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) + var credential azblob.Credential + if key == "" { + credential = azblob.NewAnonymousCredential() + } else { + credential, err = azblob.NewSharedKeyCredential(accountName, key) + if err != nil { + return nil, fmt.Errorf("invalid credentials: %w", err) + } } - return nil + + blobPipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) + blobContainer := azblob.NewContainerURL(*endpointURL, blobPipeline) + return &blobContainer, nil } -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) +// buildGCPClient constructs a gcp.GCPClient with the data from the given Secret. +// It returns an error if the Secret does not have the required field, or if the client construction fails. +func (r *BucketReconciler) buildGCPClient(ctx context.Context, secret *corev1.Secret) (*gcp.GCPClient, error) { + var client *gcp.GCPClient + var err error + if secret != nil { + if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { + return nil, err + } + client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) if err != nil { - log.Error(err, "unable to send event") - return + return nil, err } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return + } else { + client, err = gcp.NewClient(ctx) + if err != nil { + return nil, err } } + return client, nil } -func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record readiness metric") - return +// etagIndex is an index of bucket keys and their Etag values. +type etagIndex map[string]string + +// Revision calculates the SHA256 checksum of the index. +// The keys are sorted to ensure a stable order, and the SHA256 sum is then calculated for the string representations of +// the key/value pairs, each pair written on a newline +// The sum result is returned as a string. +func (i etagIndex) Revision() (string, error) { + keyIndex := make([]string, 0, len(i)) + for k := range i { + keyIndex = append(keyIndex, k) } - if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !bucket.DeletionTimestamp.IsZero()) + sort.Strings(keyIndex) + sum := sha256.New() + for _, k := range keyIndex { + if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i[k]))); err != nil { + return "", err + } } + return fmt.Sprintf("%x", sum.Sum(nil)), nil } -func (r *BucketReconciler) recordSuspension(ctx context.Context, bucket sourcev1.Bucket) { - if r.MetricsRecorder == nil { - return - } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !bucket.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) } else { - r.MetricsRecorder.RecordSuspend(*objRef, bucket.Spec.Suspend) - } -} - -func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error { - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return err + ctrl.LoggerFrom(ctx).Info(msg) } - - patch := client.MergeFrom(bucket.DeepCopy()) - bucket.Status = newStatus - - return r.Status().Patch(ctx, &bucket, patch) + r.Eventf(obj, eventType, reason, msg) } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 01ff20d87..431322ea1 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -17,59 +17,1020 @@ limitations under the License. package controllers import ( + "context" + "crypto/md5" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" "os" + "path" "path/filepath" + "strings" "testing" + "time" + + "github.com/darkowlzz/controller-check/status" + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + raw "google.golang.org/api/storage/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) -func TestBucketReconciler_checksum(t *testing.T) { +// Environment variable to set the GCP Storage host for the GCP client. +const ENV_GCP_STORAGE_HOST = "STORAGE_EMULATOR_HOST" + +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := newS3Server("test-bucket") + s3Server.Objects = []*s3MockObject{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: bucketReadyDepsNegative} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { - name string - beforeFunc func(root string) - want string - wantErr bool + name string + beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string }{ { - name: "empty root", - want: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, }, { - name: "with file", - beforeFunc: func(root string) { - mockFile(root, "a/b/c.txt", "a dummy string") + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/invalid.txt"), + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "No artifact for resource in storage"), }, - want: "309a5e6e96b4a7eea0d1cfaabf1be8ec1c063fa0", }, { - name: "with file in different path", - beforeFunc: func(root string) { - mockFile(root, "a/b.txt", "a dummy string") + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", }, - want: "e28c62b5cc488849950c4355dddc5523712616d4", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - root, err := os.MkdirTemp("", "bucket-checksum-") - if err != nil { - t.Fatal(err) + g := NewWithT(t) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + var artifact sourcev1.Artifact + + got, err := r.reconcileStorage(context.TODO(), obj, &artifact, "") + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + }) + } +} + +func TestBucketReconciler_reconcileMinioSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3MockObject + middleware http.Handler + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct S3 client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), + }, + }, + { + // TODO(hidde): test the lesser happy paths + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", + Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + } + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3MockServer + if tt.bucketName != "" { + server = newS3Server(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileGCPSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcpMockObject + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct GCP client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", + Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + } + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, } - defer os.RemoveAll(root) + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Test bucket object. + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GoogleBucketProvider, + }, + } + + // Set up the mock GCP bucket server. + server := newGCPServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + if tt.beforeFunc != nil { - tt.beforeFunc(root) + tt.beforeFunc(obj) } - got, err := (&BucketReconciler{}).checksum(root) + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(ENV_GCP_STORAGE_HOST, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(ENV_GCP_STORAGE_HOST)).ToNot(HaveOccurred()) + }() + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + // testChecksum is the checksum value of the artifacts created in this + // test. + const testChecksum = "4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1" + + tests := []struct { + name string + beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + defer f.Close() + t.Expect(err).ToNot(HaveOccurred()) + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + artifact.Checksum = testChecksum + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, tmpDir) + } + + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, &artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } + }) + } +} + +func Test_etagIndex_Revision(t *testing.T) { + tests := []struct { + name string + list etagIndex + want string + wantErr bool + }{ + { + name: "index with items", + list: map[string]string{ + "one": "one", + "two": "two", + "three": "three", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "index with items in different order", + list: map[string]string{ + "three": "three", + "one": "one", + "two": "two", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "empty index", + list: map[string]string{}, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + { + name: "nil index", + list: nil, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.list.Revision() if (err != nil) != tt.wantErr { - t.Errorf("checksum() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { - t.Errorf("checksum() got = %v, want %v", got, tt.want) + t.Errorf("revision() got = %v, want %v", got, tt.want) } }) } } +// helpers + func mockFile(root, path, content string) error { filePath := filepath.Join(root, path) if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { @@ -80,3 +1041,264 @@ func mockFile(root, path, content string) error { } return nil } + +type s3MockObject struct { + Key string + LastModified time.Time + ContentType string + Content []byte +} + +type s3MockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*s3MockObject +} + +func newS3Server(bucketName string) *s3MockServer { + s := &s3MockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle(fmt.Sprintf("/%s/", s.BucketName), http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *s3MockServer) Start() { + s.srv.Start() +} + +func (s *s3MockServer) Stop() { + s.srv.Close() +} + +func (s *s3MockServer) HTTPAddress() string { + return s.srv.URL +} + +func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { + key := path.Base(r.URL.Path) + + switch key { + case s.BucketName: + w.Header().Add("Content-Type", "application/xml") + + if r.Method == http.MethodHead { + return + } + + q := r.URL.Query() + + if q["location"] != nil { + fmt.Fprint(w, ` + +Europe + `) + return + } + + contents := "" + for _, o := range s.Objects { + etag := md5.Sum(o.Content) + contents += fmt.Sprintf(` + + %s + %s + %d + "%b" + STANDARD + `, o.Key, o.LastModified.UTC().Format(time.RFC3339), len(o.Content), etag) + } + + fmt.Fprintf(w, ` + + + %s + + + %d + 1000 + false + %s + + `, s.BucketName, len(s.Objects), contents) + default: + key, err := filepath.Rel("/"+s.BucketName, r.URL.Path) + if err != nil { + w.WriteHeader(500) + return + } + + var found *s3MockObject + for _, o := range s.Objects { + if key == o.Key { + found = o + } + } + if found == nil { + w.WriteHeader(404) + return + } + + etag := md5.Sum(found.Content) + lastModified := strings.Replace(found.LastModified.UTC().Format(time.RFC1123), "UTC", "GMT", 1) + + w.Header().Add("Content-Type", found.ContentType) + w.Header().Add("Last-Modified", lastModified) + w.Header().Add("ETag", fmt.Sprintf("\"%b\"", etag)) + w.Header().Add("Content-Length", fmt.Sprintf("%d", len(found.Content))) + + if r.Method == http.MethodHead { + return + } + + w.Write(found.Content) + } +} + +type gcpMockObject struct { + Key string + ContentType string + Content []byte +} + +type gcpMockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Etag string + Objects []*gcpMockObject + Close func() +} + +func newGCPServer(bucketName string) *gcpMockServer { + s := &gcpMockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (gs *gcpMockServer) Start() { + gs.srv.Start() +} + +func (gs *gcpMockServer) Stop() { + gs.srv.Close() +} + +func (gs *gcpMockServer) HTTPAddress() string { + return gs.srv.URL +} + +func (gs *gcpMockServer) GetAllObjects() *raw.Objects { + objs := &raw.Objects{} + for _, o := range gs.Objects { + objs.Items = append(objs.Items, getGCPObject(gs.BucketName, *o)) + } + return objs +} + +func (gs *gcpMockServer) GetObjectFile(key string) ([]byte, error) { + for _, o := range gs.Objects { + if o.Key == key { + return o.Content, nil + } + } + return nil, fmt.Errorf("not found") +} + +func (gs *gcpMockServer) handler(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.RequestURI, "/b/") { + // Handle the bucket info related queries. + if r.RequestURI == fmt.Sprintf("/b/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName) { + // Return info about the bucket. + response := getGCPBucket(gs.BucketName, gs.Etag) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } else if strings.Contains(r.RequestURI, "/o/") { + // Return info about object in the bucket. + var obj *gcpMockObject + for _, o := range gs.Objects { + // The object key in the URI is escaped. + // e.g.: /b/dummy/o/included%2Ffile.txt?alt=json&prettyPrint=false&projection=full + if r.RequestURI == fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName, url.QueryEscape(o.Key)) { + obj = o + } + } + if obj != nil { + response := getGCPObject(gs.BucketName, *obj) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else if strings.Contains(r.RequestURI, "/o?") { + // Return info about all the objects in the bucket. + response := gs.GetAllObjects() + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else { + // Handle object file query. + bucketPrefix := fmt.Sprintf("/%s/", gs.BucketName) + if strings.HasPrefix(r.RequestURI, bucketPrefix) { + // The URL path is of the format //included/file.txt. + // Extract the object key by discarding the bucket prefix. + key := strings.TrimPrefix(r.URL.Path, bucketPrefix) + // Handle returning object file in a bucket. + response, err := gs.GetObjectFile(key) + if err != nil { + w.WriteHeader(404) + return + } + w.WriteHeader(200) + w.Write(response) + return + } + w.WriteHeader(404) + return + } +} + +func getGCPObject(bucket string, obj gcpMockObject) *raw.Object { + return &raw.Object{ + Bucket: bucket, + Name: obj.Key, + ContentType: obj.ContentType, + } +} + +func getGCPBucket(name, eTag string) *raw.Bucket { + return &raw.Bucket{ + Name: name, + Location: "loc", + Etag: eTag, + } +} diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 7642a1614..d2e92d5bb 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -122,7 +122,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // check dependencies if len(repository.Spec.Include) > 0 { if err := r.checkDependencies(repository); err != nil { - repository = sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()) + repository = sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()) if err := r.updateStatus(ctx, req, repository.Status); err != nil { log.Error(err, "unable to update status for dependency not ready") return ctrl.Result{Requeue: true}, err @@ -284,7 +284,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour var gr sourcev1.GitRepository err := r.Get(context.Background(), dName, &gr) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } includedArtifacts = append(includedArtifacts, gr.GetArtifact()) } @@ -329,11 +329,11 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour for i, incl := range repository.Spec.Include { toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath()) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } } @@ -423,22 +423,11 @@ func (r *GitRepositoryReconciler) gc(repository sourcev1.GitRepository) error { // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *GitRepositoryReconciler) event(ctx context.Context, repository sourcev1.GitRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) + r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index e63f8e458..46da4db08 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -634,21 +634,11 @@ func (r *HelmChartReconciler) gc(chart sourcev1.HelmChart) error { // event emits a Kubernetes event and forwards the event to notification // controller if configured. func (r *HelmChartReconciler) event(ctx context.Context, chart sourcev1.HelmChart, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&chart, "Normal", severity, msg) + r.EventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index cb9838b15..f762fcd08 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -129,9 +129,9 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -146,7 +146,7 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) When("Setting valid valuesFiles attribute", func() { @@ -161,12 +161,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -184,12 +184,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -207,12 +207,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -228,12 +228,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -250,12 +250,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -271,12 +271,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -682,7 +682,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Committing a new version in the chart metadata") @@ -727,9 +727,9 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*now.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*now.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values).ToNot(BeNil()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) @@ -744,7 +744,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Revision != updated.Status.Artifact.Revision && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) Expect(got.Status.Artifact.Revision).To(ContainSubstring(updated.Status.Artifact.Revision)) Expect(got.Status.Artifact.Revision).To(ContainSubstring(commit.String()[0:12])) @@ -762,12 +762,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -785,12 +785,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -808,12 +808,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -834,16 +834,16 @@ var _ = Describe("HelmChartReconciler", func() { // Use status condition to be sure. for _, condn := range got.Status.Conditions { if strings.Contains(condn.Message, "with merged values files [./testdata/charts/helmchart/override.yaml]") && - storage.ArtifactExist(*got.Status.Artifact) { + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) { return true } } return false }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -860,12 +860,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -970,7 +970,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) }) }) @@ -1213,9 +1213,9 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -1232,12 +1232,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1255,12 +1255,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1278,12 +1278,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1299,12 +1299,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -1321,12 +1321,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index d82bdad69..a03f6b61b 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -335,21 +335,11 @@ func (r *HelmRepositoryReconciler) gc(repository sourcev1.HelmRepository) error // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *HelmRepositoryReconciler) event(ctx context.Context, repository sourcev1.HelmRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) + r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index e7d945a60..69a3656e9 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -99,7 +99,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Updating the chart index") @@ -112,7 +112,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) updated := &sourcev1.HelmRepository{} @@ -291,7 +291,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") @@ -385,7 +385,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go new file mode 100644 index 000000000..4b4d1f274 --- /dev/null +++ b/controllers/legacy_suite_test.go @@ -0,0 +1,190 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "math/rand" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/getter" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager ctrl.Manager +var ginkgoTestEnv *envtest.Environment +var ginkgoTestStorage *Storage + +var examplePublicKey []byte +var examplePrivateKey []byte +var exampleCA []byte +var lctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger( + zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), + ) + lctx, cancel = context.WithCancel(ctx) + + By("bootstrapping test environment") + t := true + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + ginkgoTestEnv = &envtest.Environment{ + UseExistingCluster: &t, + } + } else { + ginkgoTestEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + } + + var err error + cfg, err = ginkgoTestEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = sourcev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + Expect(loadExampleKeys()).To(Succeed()) + + tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") + + ginkgoTestStorage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") + // serve artifacts from the filesystem, as done in main.go + fs := http.FileServer(http.Dir(tmpStoragePath)) + http.Handle("/", fs) + go http.ListenAndServe(":5050", nil) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + err = (&GitRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") + + err = (&HelmRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") + + err = (&HelmChartReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + + go func() { + err = k8sManager.Start(lctx) + Expect(err).ToNot(HaveOccurred()) + }() + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + defer GinkgoRecover() + cancel() + if ginkgoTestStorage != nil { + err := os.RemoveAll(ginkgoTestStorage.BasePath) + Expect(err).NotTo(HaveOccurred()) + } + err := ginkgoTestEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func loadExampleKeys() (err error) { + examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + return err + } + examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + return err + } + exampleCA, err = os.ReadFile("testdata/certs/ca.pem") + return err +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/controllers/storage.go b/controllers/storage.go index 5c1f7be02..d58508f40 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -53,7 +53,7 @@ type Storage struct { Timeout time.Duration `json:"timeout"` } -// NewStorage creates the storage helper for a given path and hostname +// NewStorage creates the storage helper for a given path and hostname. func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) { if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { return nil, fmt.Errorf("invalid dir path: %s", basePath) @@ -81,7 +81,11 @@ func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { if artifact.Path == "" { return } - artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path) + format := "http://%s/%s" + if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { + format = "%s/%s" + } + artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) } // SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 9520bcbb6..02426e4ed 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,179 +17,138 @@ limitations under the License. package controllers import ( - "context" + "fmt" "math/rand" - "net/http" "os" "path/filepath" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/testenv" + "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" // +kubebuilder:scaffold:imports ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +// These tests make use of plain Go using Gomega for assertions. +// At the beginning of every (sub)test Gomega can be initialized +// using gomega.NewWithT. +// Refer to http://onsi.github.io/gomega/ to learn more about +// Gomega. -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var storage *Storage +const ( + timeout = 10 * time.Second + interval = 1 * time.Second +) -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte -var ctx context.Context -var cancel context.CancelFunc +var ( + testEnv *testenv.Environment + testStorage *Storage + testServer *testserver.ArtifactServer + testMetricsH controller.Metrics + ctx = ctrl.SetupSignalHandler() +) -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) +var ( + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte +) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) +func init() { + rand.Seed(time.Now().UnixNano()) } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) - ctx, cancel = context.WithCancel(context.TODO()) - - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } +func TestMain(m *testing.M) { + initTestTLS() + + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases"))) var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + testServer, err = testserver.NewTempArtifactServer() + if err != nil { + panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) + } + fmt.Println("Starting the test storage server") + testServer.Start() + + testStorage, err = newTestStorage(testServer.HTTPServer) + if err != nil { + panic(fmt.Sprintf("Failed to create a test storage: %v", err)) + } + + testMetricsH = controller.MustMakeMetrics(testEnv) + + //if err := (&GitRepositoryReconciler{ + // Client: testEnv, + // Metrics: testMetricsH, + // Storage: testStorage, + //}).SetupWithManager(testEnv); err != nil { + // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + //} + + if err := (&BucketReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) + } go func() { - defer GinkgoRecover() - err = k8sManager.Start(ctx) - Expect(err).ToNot(HaveOccurred()) + fmt.Println("Starting the test environment") + if err := testEnv.Start(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } }() + <-testEnv.Manager.Elected() - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + code := m.Run() - close(done) -}, 60) + fmt.Println("Stopping the test environment") + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) + } -var _ = AfterSuite(func() { - cancel() - By("tearing down the test environment") - if storage != nil { - err := os.RemoveAll(storage.BasePath) - Expect(err).NotTo(HaveOccurred()) + fmt.Println("Stopping the storage server") + testServer.Stop() + if err := os.RemoveAll(testServer.Root()); err != nil { + panic(fmt.Sprintf("Failed to remove storage server dir: %v", err)) } - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) -func init() { - rand.Seed(time.Now().UnixNano()) + os.Exit(code) } -func loadExampleKeys() (err error) { - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") +func initTestTLS() { + var err error + tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + panic(err) + } + tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") if err != nil { - return err + panic(err) } - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + tlsCA, err = os.ReadFile("testdata/certs/ca.pem") if err != nil { - return err + panic(err) } - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - return err } -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") - -func randStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] +func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { + storage, err := NewStorage(s.Root(), s.URL(), timeout) + if err != nil { + return nil, err } - return string(b) + return storage, nil } diff --git a/docs/api/source.md b/docs/api/source.md index 8cc4f8ddf..4a506c472 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -313,10 +313,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional)

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

+For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.

@@ -372,7 +370,7 @@ GitRepositoryVerification (Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

+

Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.

@@ -384,9 +382,8 @@ string (Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.

@@ -398,7 +395,8 @@ bool (Optional) -

This flag tells the controller to suspend the reconciliation of this source.

+

Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.

@@ -423,8 +421,7 @@ bool (Optional) -

When enabled, after the clone is created, initializes all submodules within, -using their default settings. +

When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.

@@ -438,7 +435,8 @@ This option is available only when using the ‘go-git’ GitImplementat -

Extra git repositories to map into the repository

+

Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.

@@ -1349,10 +1347,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional)

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

+For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.

@@ -1408,7 +1404,7 @@ GitRepositoryVerification (Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

+

Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.

@@ -1420,9 +1416,8 @@ string (Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.

@@ -1434,7 +1429,8 @@ bool (Optional) -

This flag tells the controller to suspend the reconciliation of this source.

+

Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.

@@ -1459,8 +1455,7 @@ bool (Optional) -

When enabled, after the clone is created, initializes all submodules within, -using their default settings. +

When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.

@@ -1474,7 +1469,8 @@ This option is available only when using the ‘go-git’ GitImplementat -

Extra git repositories to map into the repository

+

Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.

@@ -1547,8 +1543,7 @@ string (Optional) -

URL is the download link for the artifact output of the last repository -sync.

+

URL is the download link for the artifact output of the last repository sync.

@@ -1623,7 +1618,7 @@ string -

Mode describes what git object should be verified, currently (‘head’).

+

Mode describes what Git object should be verified, currently (‘head’).

@@ -1636,7 +1631,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference -

The secret name containing the public keys of all trusted Git authors.

+

SecretRef containing the public keys of all trusted Git authors.

diff --git a/go.mod b/go.mod index 1fe12d2d4..465ca68bc 100644 --- a/go.mod +++ b/go.mod @@ -6,17 +6,25 @@ replace github.com/fluxcd/source-controller/api => ./api require ( cloud.google.com/go/storage v1.16.0 + github.com/Azure/aad-pod-identity v1.8.6 + github.com/Azure/azure-sdk-for-go v60.0.0+incompatible + github.com/Azure/azure-storage-blob-go v0.14.0 + github.com/Azure/go-autorest/autorest v0.11.23 + github.com/Azure/go-autorest/autorest/adal v0.9.18 + github.com/Azure/go-autorest/autorest/to v0.4.0 github.com/Masterminds/semver/v3 v3.1.1 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/cyphar/filepath-securejoin v0.2.2 + github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b - github.com/fluxcd/pkg/apis/meta v0.10.2 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 github.com/fluxcd/pkg/gittestserver v0.5.0 github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.4.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.12.3 + github.com/fluxcd/pkg/runtime v0.13.0-rc.6.0.20220119182053-85644747ea3d github.com/fluxcd/pkg/ssh v0.2.0 + github.com/fluxcd/pkg/testserver v0.2.0 github.com/fluxcd/pkg/untar v0.1.0 github.com/fluxcd/pkg/version v0.1.0 github.com/fluxcd/source-controller/api v0.20.1 @@ -29,21 +37,29 @@ require ( github.com/onsi/gomega v1.17.0 github.com/otiai10/copy v1.7.0 github.com/spf13/pflag v1.0.5 - golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/api v0.54.0 + gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible helm.sh/helm/v3 v3.7.2 k8s.io/api v0.23.1 k8s.io/apimachinery v0.23.1 k8s.io/client-go v0.23.1 + k8s.io/klog/v2 v2.30.0 sigs.k8s.io/controller-runtime v0.11.0 sigs.k8s.io/yaml v1.3.0 ) require ( cloud.google.com/go v0.90.0 // indirect + github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 // indirect github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect github.com/Masterminds/goutils v1.1.1 // indirect @@ -77,7 +93,6 @@ require ( github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.7.0 // indirect github.com/fluxcd/pkg/apis/acl v0.0.3 // indirect - github.com/fluxcd/pkg/testserver v0.1.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-git/gcfg v1.5.0 // indirect @@ -88,6 +103,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/uuid v4.1.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect @@ -114,12 +130,14 @@ require ( github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect github.com/klauspost/compress v1.13.5 // indirect github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-colorable v0.0.9 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.4 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect @@ -174,8 +192,8 @@ require ( golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect - golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 // indirect golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect @@ -191,13 +209,11 @@ require ( gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect k8s.io/apiextensions-apiserver v0.23.0 // indirect k8s.io/apiserver v0.23.0 // indirect k8s.io/cli-runtime v0.23.0 // indirect k8s.io/component-base v0.23.0 // indirect - k8s.io/klog/v2 v2.30.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/kubectl v0.22.4 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect diff --git a/go.sum b/go.sum index 69b819334..40bfdfa39 100644 --- a/go.sum +++ b/go.sum @@ -44,24 +44,51 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.16.0 h1:1UwAux2OZP4310YXg5ohqBEpV16Y93uZG4+qOX7K2Kg= cloud.google.com/go/storage v1.16.0/go.mod h1:ieKBmUyzcftN5tbxwnXClMKH00CfcQ+xL6NN0r5QfmE= +contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/aad-pod-identity v1.8.6 h1:aa5ybqA5d2e18MPEQyXvIDDGmXMa6bN7ZxK5PKpWQoE= +github.com/Azure/aad-pod-identity v1.8.6/go.mod h1:A+7rb0WOEhBmVaFSl/MtdVCiugoTilY7GpwCnrgzm2w= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v57.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v60.0.0+incompatible h1:vVRJhSSTwhIHQTzTjqoZCItFJeBwfdNSqHcgGV10FHQ= +github.com/Azure/azure-sdk-for-go v60.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.23 h1:bRQWsW25/YkoxnIqXMPF94JW33qWDcrPMZ3bINaAruU= +github.com/Azure/go-autorest/autorest v0.11.23/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.10/go.mod h1:zQXYYNX9kXzRMrJNVXWUfNy38oPMF5/2TeZ4Wylc9fE= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -209,6 +236,7 @@ github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.3.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -231,6 +259,8 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c h1:pyp/Dvd1gYP/D3z1zs46h0YhYzFp0hjxw0XVIO9+vh4= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c/go.mod h1:haYO9UW76kUUKpIBbv3ydaU5wZ/7r0yqp61PGzVRSYU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -240,6 +270,8 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/distribution/v3 v3.0.0-20210804104954-38ab4c606ee3 h1:rEK0juuU5idazw//KzUcL3yYwUU3DIe2OnfJwjDBqno= github.com/distribution/distribution/v3 v3.0.0-20210804104954-38ab4c606ee3/go.mod h1:gt38b7cvVKazi5XkHvINNytZXgTEntyhtyM3HQz46Nk= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -298,8 +330,8 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= github.com/fluxcd/pkg/gittestserver v0.5.0 h1:pPdaz7pUsukt4eQ+xQeNwoypOXGGOHFHnPjIHQAv0tE= github.com/fluxcd/pkg/gittestserver v0.5.0/go.mod h1:mFEF/Xrg+CjQH4VFCRCou2qZmhWKo7EYcjr7MIoX6+s= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= @@ -308,12 +340,14 @@ github.com/fluxcd/pkg/helmtestserver v0.4.0 h1:RT0G5buw5qrzEfIIH0fklppIvPAaQF//p github.com/fluxcd/pkg/helmtestserver v0.4.0/go.mod h1:JOI9f3oXUFIWmMKWMBan7FjglAU+fRTO/sPPV/Kj3gQ= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/runtime v0.12.3 h1:h21AZ3YG5MAP7DxFF9hfKrP+vFzys2L7CkUbPFjbP/0= -github.com/fluxcd/pkg/runtime v0.12.3/go.mod h1:imJ2xYy/d4PbSinX2IefmZk+iS2c1P5fY0js8mCE4SM= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6.0.20220119182053-85644747ea3d h1:RllfQpqtWKw+ZkrphMDIrX9J4X4u5am58T+rjVGkzsE= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6.0.20220119182053-85644747ea3d/go.mod h1:uGPudgMUNC3wu7Zoh6AgJM8WSH3VpmnzjrwkVb86d3Y= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= -github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= +github.com/fluxcd/pkg/testserver v0.2.0 h1:Mj0TapmKaywI6Fi5wvt1LAZpakUHmtzWQpJNKQ0Krt4= +github.com/fluxcd/pkg/testserver v0.2.0/go.mod h1:bgjjydkXsZTeFzjz9Cr4heGANr41uTB1Aj1Q5qzuYVk= github.com/fluxcd/pkg/untar v0.1.0 h1:k97V/xV5hFrAkIkVPuv5AVhyxh1ZzzAKba/lbDfGo6o= github.com/fluxcd/pkg/untar v0.1.0/go.mod h1:aGswNyzB1mlz/T/kpOS58mITBMxMKc9tlJBH037A2HY= github.com/fluxcd/pkg/version v0.1.0 h1:v+SmCanmCB5Tj2Cx9TXlj+kNRfPGbAvirkeqsp7ZEAQ= @@ -401,6 +435,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -615,6 +651,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -646,6 +684,8 @@ github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -778,6 +818,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= @@ -790,6 +831,7 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= @@ -798,6 +840,7 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1000,8 +1043,9 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1060,6 +1104,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1112,8 +1157,9 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1152,6 +1198,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1177,6 +1224,7 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1213,8 +1261,9 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 h1:WecRHqgE09JBkh/584XIE6PMz5KKE/vER4izNUi30AQ= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/internal/error/error.go b/internal/error/error.go new file mode 100644 index 000000000..65d1feb62 --- /dev/null +++ b/internal/error/error.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +// Stalling is the reconciliation stalled state error. It contains an error +// and a reason for the stalled condition. +type Stalling struct { + // Reason is the stalled condition reason string. + Reason string + // Err is the error that caused stalling. This can be used as the message in + // stalled condition. + Err error +} + +// Error implements error interface. +func (se *Stalling) Error() string { + return se.Err.Error() +} + +// Unwrap returns the underlying error. +func (se *Stalling) Unwrap() error { + return se.Err +} + +// Event is an error event. It can be used to construct an event to be +// recorded. +type Event struct { + Reason string + Err error +} + +// Error implements error interface. +func (ee *Event) Error() string { + return ee.Err.Error() +} + +// Unwrap returns the underlying error. +func (ee *Event) Unwrap() error { + return ee.Err +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go new file mode 100644 index 000000000..f7593fcee --- /dev/null +++ b/internal/reconcile/reconcile.go @@ -0,0 +1,148 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + serror "github.com/fluxcd/source-controller/internal/error" +) + +// Result is a type for creating an abstraction for the controller-runtime +// reconcile Result to simplify the Result values. +type Result int + +const ( + // ResultEmpty indicates a reconcile result which does not requeue. + ResultEmpty Result = iota + // ResultRequeue indicates a reconcile result which should immediately + // requeue. + ResultRequeue + // ResultSuccess indicates a reconcile result which should be + // requeued on the interval as defined on the reconciled object. + ResultSuccess +) + +// BuildRuntimeResult converts a given Result and error into the +// return values of a controller's Reconcile function. +func BuildRuntimeResult(ctx context.Context, recorder kuberecorder.EventRecorder, obj sourcev1.Source, rr Result, err error) (ctrl.Result, error) { + // NOTE: The return values can be modified based on the error type. + // For example, if an error signifies a short requeue period that's + // not equal to the requeue period of the object, the error can be checked + // and an appropriate result with the period can be returned. + // + // Example: + // if e, ok := err.(*waitError); ok { + // return ctrl.Result{RequeueAfter: e.RequeueAfter}, err + // } + + // Log and record event based on the error. + switch e := err.(type) { + case *serror.Event: + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + case *serror.Stalling: + // Stalling errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + } + + switch rr { + case ResultRequeue: + return ctrl.Result{Requeue: true}, err + case ResultSuccess: + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err + default: + return ctrl.Result{}, err + } +} + +// ComputeReconcileResult analyzes the reconcile results (result + error), +// updates the status conditions of the object with any corrections and returns +// result patch configuration and any error to the caller. The caller is +// responsible for using the patch option to patch the object in the API server. +func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, ownedConditions []string) ([]patch.Option, error) { + // Remove reconciling condition on successful reconciliation. + if recErr == nil && res == ResultSuccess { + conditions.Delete(obj, meta.ReconcilingCondition) + } + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + pOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: ownedConditions, + }, + } + + // Analyze the reconcile error. + switch t := recErr.(type) { + case *serror.Stalling: + if res == ResultEmpty { + // The current generation has been reconciled successfully and it + // has resulted in a stalled state. Return no error to stop further + // requeuing. + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + conditions.MarkStalled(obj, t.Reason, t.Error()) + return pOpts, nil + } + // NOTE: Non-empty result with stalling error indicates that the + // returned result is incorrect. + case nil: + // The reconcile didn't result in any error, we are not in stalled + // state. If a requeue is requested, the current generation has not been + // reconciled successfully. + if res != ResultRequeue { + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + } + conditions.Delete(obj, meta.StalledCondition) + default: + // The reconcile resulted in some error, but we are not in stalled + // state. + conditions.Delete(obj, meta.StalledCondition) + } + + return pOpts, recErr +} + +// LowestRequeuingResult returns the ReconcileResult with the lowest requeue +// period. +// Weightage: +// ResultRequeue - immediate requeue (lowest) +// ResultSuccess - requeue at an interval +// ResultEmpty - no requeue +func LowestRequeuingResult(i, j Result) Result { + switch { + case i == ResultEmpty: + return j + case j == ResultEmpty: + return i + case i == ResultRequeue: + return i + case j == ResultRequeue: + return j + default: + return j + } +} diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go new file mode 100644 index 000000000..bb0cf4c44 --- /dev/null +++ b/internal/reconcile/reconcile_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestLowestRequeuingResult(t *testing.T) { + tests := []struct { + name string + i Result + j Result + wantResult Result + }{ + {"bail,requeue", ResultEmpty, ResultRequeue, ResultRequeue}, + {"bail,requeueInterval", ResultEmpty, ResultSuccess, ResultSuccess}, + {"requeue,bail", ResultRequeue, ResultEmpty, ResultRequeue}, + {"requeue,requeueInterval", ResultRequeue, ResultSuccess, ResultRequeue}, + {"requeueInterval,requeue", ResultSuccess, ResultRequeue, ResultRequeue}, + {"requeueInterval,requeueInterval", ResultSuccess, ResultSuccess, ResultSuccess}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(LowestRequeuingResult(tt.i, tt.j)).To(Equal(tt.wantResult)) + }) + } +} diff --git a/main.go b/main.go index 67f00a920..ea6e4c233 100644 --- a/main.go +++ b/main.go @@ -33,13 +33,12 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "github.com/fluxcd/pkg/runtime/client" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" @@ -77,6 +76,7 @@ func main() { storagePath string storageAddr string storageAdvAddr string + azureCloudConfig string concurrent int requeueDependency time.Duration watchAllNamespaces bool @@ -110,10 +110,14 @@ func main() { "The max allowed size in bytes of a file in a Helm chart.") flag.DurationVar(&requeueDependency, "requeue-dependency", 30*time.Second, "The interval at which failing dependencies are reevaluated.") + flag.StringVar(&azureCloudConfig, "azure-cloud-config", + envOrDefault("AZURE_CLOUD_CONFIG", "/etc/kubernetes/azure.json"), + "Azure cloud config file.") clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) + flag.Parse() ctrl.SetLogger(logger.NewLogger(logOptions)) @@ -123,18 +127,6 @@ func main() { helm.MaxChartSize = helmChartLimit helm.MaxChartFileSize = helmChartFileLimit - var eventRecorder *events.Recorder - if eventsAddr != "" { - var err error - if eventRecorder, err = events.NewRecorder(eventsAddr, controllerName); err != nil { - setupLog.Error(err, "unable to create event recorder") - os.Exit(1) - } - } - - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) - watchNamespace := "" if !watchAllNamespaces { watchNamespace = os.Getenv("RUNTIME_NAMESPACE") @@ -163,18 +155,25 @@ func main() { probes.SetupChecks(mgr, setupLog) pprof.SetupHandlers(mgr, setupLog) + var eventRecorder *events.Recorder + if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) + } + + metricsH := helper.MustMakeMetrics(mgr) + if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) } storage := mustInitStorage(storagePath, storageAdvAddr, setupLog) if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, @@ -183,13 +182,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -197,13 +195,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmChartReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -211,12 +208,11 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + AzureCloudConfig: azureCloudConfig, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { diff --git a/pkg/azure/cloudprovider/cloudprovider.go b/pkg/azure/cloudprovider/cloudprovider.go new file mode 100644 index 000000000..b6d44ae51 --- /dev/null +++ b/pkg/azure/cloudprovider/cloudprovider.go @@ -0,0 +1,124 @@ +/* + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE +*/ +// based on https://github.com/Azure/aad-pod-identity/blob/0fbc00f8b572ee780199ddb4489a94f1f01d3815/pkg/cloudprovider/cloudprovider.go + +package cloudprovider + +import ( + "fmt" + "os" + "strings" + + "github.com/Azure/aad-pod-identity/pkg/config" + "github.com/Azure/aad-pod-identity/pkg/utils" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "gopkg.in/yaml.v2" + "k8s.io/klog/v2" +) + +// Client is a cloud provider client +type Client struct { + Config config.AzureConfig + configFile string + Authorizer autorest.Authorizer +} + +// NewCloudProvider returns a azure cloud provider client +func NewCloudProvider(configFile string) (*Client, error) { + client := &Client{ + configFile: configFile, + } + if err := client.Init(); err != nil { + return nil, fmt.Errorf("failed to initialize cloud provider client, error: %+v", err) + } + return client, nil +} + +// Init initializes the cloud provider client based +// on a config path or environment variables +func (c *Client) Init() error { + c.Config = config.AzureConfig{} + if c.configFile != "" { + klog.V(6).Info("populating AzureConfig from azure.json") + bytes, err := os.ReadFile(c.configFile) + if err != nil { + return fmt.Errorf("failed to config file %s, error: %+v", c.configFile, err) + } + if err = yaml.Unmarshal(bytes, &c.Config); err != nil { + return fmt.Errorf("failed to unmarshal JSON, error: %+v", err) + } + } else { + klog.V(6).Info("populating AzureConfig from secret/environment variables") + c.Config.Cloud = os.Getenv("CLOUD") + c.Config.TenantID = os.Getenv("TENANT_ID") + c.Config.ClientID = os.Getenv("CLIENT_ID") + c.Config.ClientSecret = os.Getenv("CLIENT_SECRET") + c.Config.SubscriptionID = os.Getenv("SUBSCRIPTION_ID") + c.Config.ResourceGroupName = os.Getenv("RESOURCE_GROUP") + c.Config.VMType = os.Getenv("VM_TYPE") + c.Config.UseManagedIdentityExtension = strings.EqualFold(os.Getenv("USE_MSI"), "True") + c.Config.UserAssignedIdentityID = os.Getenv("USER_ASSIGNED_MSI_CLIENT_ID") + } + + azureEnv, err := azure.EnvironmentFromName(c.Config.Cloud) + if err != nil { + return fmt.Errorf("failed to get cloud environment, error: %+v", err) + } + + err = adal.AddToUserAgent("flux-source-controller") + if err != nil { + return fmt.Errorf("failed to add flux-source-controller to user agent, error: %+v", err) + } + + oauthConfig, err := adal.NewOAuthConfig(azureEnv.ActiveDirectoryEndpoint, c.Config.TenantID) + if err != nil { + return fmt.Errorf("failed to create OAuth config, error: %+v", err) + } + + var spt *adal.ServicePrincipalToken + if c.Config.UseManagedIdentityExtension { + klog.Infof("using user assigned identity: %s for authentication.", utils.RedactClientID(c.Config.UserAssignedIdentityID)) + spt, err = adal.NewServicePrincipalTokenFromManagedIdentity(azureEnv.ResourceManagerEndpoint, &adal.ManagedIdentityOptions{ + ClientID: c.Config.UserAssignedIdentityID, + }) + if err != nil { + return fmt.Errorf("failed to get token from user-assigned identity, error: %+v", err) + } + } else { // This is the default scenario - use service principal to get the token. + spt, err = adal.NewServicePrincipalToken( + *oauthConfig, + c.Config.ClientID, + c.Config.ClientSecret, + azureEnv.ResourceManagerEndpoint, + ) + if err != nil { + return fmt.Errorf("failed to get service principal token, error: %+v", err) + } + } + + c.Authorizer = autorest.NewBearerAuthorizer(spt) + return nil +} diff --git a/pkg/gcp/gcp.go b/pkg/gcp/gcp.go index 9127fcde3..f98e498c4 100644 --- a/pkg/gcp/gcp.go +++ b/pkg/gcp/gcp.go @@ -73,7 +73,8 @@ func ValidateSecret(secret map[string][]byte, name string) error { func (c *GCPClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) if err == gcpstorage.ErrBucketNotExist { - return false, err + // Not returning error to be compatible with minio's API. + return false, nil } if err != nil { return false, err